var/home/core/zuul-output/0000755000175000017500000000000015110065475014531 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015110105051015456 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log0000644000000000000000005411337415110105042017672 0ustar rootrootNov 21 13:42:17 crc systemd[1]: Starting Kubernetes Kubelet... Nov 21 13:42:18 crc restorecon[4758]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 21 13:42:18 crc restorecon[4758]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 21 13:42:19 crc restorecon[4758]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 21 13:42:19 crc restorecon[4758]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Nov 21 13:42:21 crc kubenswrapper[5133]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 21 13:42:21 crc kubenswrapper[5133]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Nov 21 13:42:21 crc kubenswrapper[5133]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 21 13:42:21 crc kubenswrapper[5133]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 21 13:42:21 crc kubenswrapper[5133]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Nov 21 13:42:21 crc kubenswrapper[5133]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.569465 5133 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.574515 5133 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.574545 5133 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.574555 5133 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.574564 5133 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.574572 5133 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.574580 5133 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.574587 5133 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.574596 5133 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.574605 5133 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.574612 5133 feature_gate.go:330] unrecognized feature gate: Example Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.574620 5133 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.574628 5133 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.574636 5133 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.574645 5133 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.574652 5133 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.574660 5133 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.574674 5133 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.574681 5133 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.574690 5133 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.574697 5133 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.574705 5133 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.574712 5133 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.574720 5133 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.574729 5133 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.574736 5133 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.574743 5133 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.574752 5133 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.574759 5133 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.574770 5133 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.574779 5133 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.574787 5133 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.574795 5133 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.574803 5133 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.574813 5133 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.574823 5133 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.574832 5133 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.574841 5133 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.574850 5133 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.574859 5133 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.574867 5133 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.574876 5133 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.574886 5133 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.574894 5133 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.574902 5133 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.574913 5133 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.574923 5133 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.574931 5133 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.574939 5133 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.574948 5133 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.574957 5133 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.574965 5133 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.574973 5133 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.574980 5133 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.574988 5133 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.574995 5133 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.575029 5133 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.575037 5133 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.575045 5133 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.575053 5133 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.575061 5133 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.575074 5133 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.575084 5133 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.575093 5133 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.575101 5133 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.575110 5133 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.575118 5133 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.575127 5133 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.575136 5133 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.575145 5133 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.575153 5133 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.575162 5133 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577096 5133 flags.go:64] FLAG: --address="0.0.0.0" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577130 5133 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577145 5133 flags.go:64] FLAG: --anonymous-auth="true" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577156 5133 flags.go:64] FLAG: --application-metrics-count-limit="100" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577174 5133 flags.go:64] FLAG: --authentication-token-webhook="false" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577183 5133 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577198 5133 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577209 5133 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577218 5133 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577227 5133 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577237 5133 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577247 5133 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577256 5133 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577265 5133 flags.go:64] FLAG: --cgroup-root="" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577273 5133 flags.go:64] FLAG: --cgroups-per-qos="true" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577283 5133 flags.go:64] FLAG: --client-ca-file="" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577292 5133 flags.go:64] FLAG: --cloud-config="" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577300 5133 flags.go:64] FLAG: --cloud-provider="" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577309 5133 flags.go:64] FLAG: --cluster-dns="[]" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577320 5133 flags.go:64] FLAG: --cluster-domain="" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577330 5133 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577340 5133 flags.go:64] FLAG: --config-dir="" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577348 5133 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577358 5133 flags.go:64] FLAG: --container-log-max-files="5" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577369 5133 flags.go:64] FLAG: --container-log-max-size="10Mi" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577378 5133 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577388 5133 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577397 5133 flags.go:64] FLAG: --containerd-namespace="k8s.io" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577407 5133 flags.go:64] FLAG: --contention-profiling="false" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577416 5133 flags.go:64] FLAG: --cpu-cfs-quota="true" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577425 5133 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577434 5133 flags.go:64] FLAG: --cpu-manager-policy="none" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577443 5133 flags.go:64] FLAG: --cpu-manager-policy-options="" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577454 5133 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577464 5133 flags.go:64] FLAG: --enable-controller-attach-detach="true" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577474 5133 flags.go:64] FLAG: --enable-debugging-handlers="true" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577483 5133 flags.go:64] FLAG: --enable-load-reader="false" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577492 5133 flags.go:64] FLAG: --enable-server="true" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577502 5133 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577513 5133 flags.go:64] FLAG: --event-burst="100" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577522 5133 flags.go:64] FLAG: --event-qps="50" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577531 5133 flags.go:64] FLAG: --event-storage-age-limit="default=0" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577541 5133 flags.go:64] FLAG: --event-storage-event-limit="default=0" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577550 5133 flags.go:64] FLAG: --eviction-hard="" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577561 5133 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577570 5133 flags.go:64] FLAG: --eviction-minimum-reclaim="" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577579 5133 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577589 5133 flags.go:64] FLAG: --eviction-soft="" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577598 5133 flags.go:64] FLAG: --eviction-soft-grace-period="" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577607 5133 flags.go:64] FLAG: --exit-on-lock-contention="false" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577618 5133 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577627 5133 flags.go:64] FLAG: --experimental-mounter-path="" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577636 5133 flags.go:64] FLAG: --fail-cgroupv1="false" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577645 5133 flags.go:64] FLAG: --fail-swap-on="true" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577655 5133 flags.go:64] FLAG: --feature-gates="" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577666 5133 flags.go:64] FLAG: --file-check-frequency="20s" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577675 5133 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577685 5133 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577694 5133 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577703 5133 flags.go:64] FLAG: --healthz-port="10248" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577712 5133 flags.go:64] FLAG: --help="false" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577722 5133 flags.go:64] FLAG: --hostname-override="" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577731 5133 flags.go:64] FLAG: --housekeeping-interval="10s" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577740 5133 flags.go:64] FLAG: --http-check-frequency="20s" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577749 5133 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577758 5133 flags.go:64] FLAG: --image-credential-provider-config="" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577767 5133 flags.go:64] FLAG: --image-gc-high-threshold="85" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577776 5133 flags.go:64] FLAG: --image-gc-low-threshold="80" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577785 5133 flags.go:64] FLAG: --image-service-endpoint="" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577794 5133 flags.go:64] FLAG: --kernel-memcg-notification="false" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577803 5133 flags.go:64] FLAG: --kube-api-burst="100" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577812 5133 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577821 5133 flags.go:64] FLAG: --kube-api-qps="50" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577830 5133 flags.go:64] FLAG: --kube-reserved="" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577839 5133 flags.go:64] FLAG: --kube-reserved-cgroup="" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577848 5133 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577857 5133 flags.go:64] FLAG: --kubelet-cgroups="" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577866 5133 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577876 5133 flags.go:64] FLAG: --lock-file="" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577885 5133 flags.go:64] FLAG: --log-cadvisor-usage="false" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577894 5133 flags.go:64] FLAG: --log-flush-frequency="5s" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577903 5133 flags.go:64] FLAG: --log-json-info-buffer-size="0" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577916 5133 flags.go:64] FLAG: --log-json-split-stream="false" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577926 5133 flags.go:64] FLAG: --log-text-info-buffer-size="0" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577935 5133 flags.go:64] FLAG: --log-text-split-stream="false" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577946 5133 flags.go:64] FLAG: --logging-format="text" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577957 5133 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577969 5133 flags.go:64] FLAG: --make-iptables-util-chains="true" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577981 5133 flags.go:64] FLAG: --manifest-url="" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.577993 5133 flags.go:64] FLAG: --manifest-url-header="" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.578037 5133 flags.go:64] FLAG: --max-housekeeping-interval="15s" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.578050 5133 flags.go:64] FLAG: --max-open-files="1000000" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.578064 5133 flags.go:64] FLAG: --max-pods="110" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.578076 5133 flags.go:64] FLAG: --maximum-dead-containers="-1" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.578088 5133 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.578099 5133 flags.go:64] FLAG: --memory-manager-policy="None" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.578111 5133 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.578123 5133 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.578135 5133 flags.go:64] FLAG: --node-ip="192.168.126.11" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.578147 5133 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.578168 5133 flags.go:64] FLAG: --node-status-max-images="50" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.578179 5133 flags.go:64] FLAG: --node-status-update-frequency="10s" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.578188 5133 flags.go:64] FLAG: --oom-score-adj="-999" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.578198 5133 flags.go:64] FLAG: --pod-cidr="" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.578207 5133 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.578221 5133 flags.go:64] FLAG: --pod-manifest-path="" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.578230 5133 flags.go:64] FLAG: --pod-max-pids="-1" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.578240 5133 flags.go:64] FLAG: --pods-per-core="0" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.578249 5133 flags.go:64] FLAG: --port="10250" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.578258 5133 flags.go:64] FLAG: --protect-kernel-defaults="false" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.578268 5133 flags.go:64] FLAG: --provider-id="" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.578276 5133 flags.go:64] FLAG: --qos-reserved="" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.578286 5133 flags.go:64] FLAG: --read-only-port="10255" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.578295 5133 flags.go:64] FLAG: --register-node="true" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.578306 5133 flags.go:64] FLAG: --register-schedulable="true" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.578316 5133 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.578330 5133 flags.go:64] FLAG: --registry-burst="10" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.578340 5133 flags.go:64] FLAG: --registry-qps="5" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.578349 5133 flags.go:64] FLAG: --reserved-cpus="" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.578358 5133 flags.go:64] FLAG: --reserved-memory="" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.578369 5133 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.578378 5133 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.578387 5133 flags.go:64] FLAG: --rotate-certificates="false" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.578396 5133 flags.go:64] FLAG: --rotate-server-certificates="false" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.578404 5133 flags.go:64] FLAG: --runonce="false" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.578414 5133 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.578423 5133 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.578432 5133 flags.go:64] FLAG: --seccomp-default="false" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.578442 5133 flags.go:64] FLAG: --serialize-image-pulls="true" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.578451 5133 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.578460 5133 flags.go:64] FLAG: --storage-driver-db="cadvisor" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.578469 5133 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.578479 5133 flags.go:64] FLAG: --storage-driver-password="root" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.578488 5133 flags.go:64] FLAG: --storage-driver-secure="false" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.578497 5133 flags.go:64] FLAG: --storage-driver-table="stats" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.578506 5133 flags.go:64] FLAG: --storage-driver-user="root" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.578515 5133 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.578524 5133 flags.go:64] FLAG: --sync-frequency="1m0s" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.578533 5133 flags.go:64] FLAG: --system-cgroups="" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.578542 5133 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.578555 5133 flags.go:64] FLAG: --system-reserved-cgroup="" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.578564 5133 flags.go:64] FLAG: --tls-cert-file="" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.578573 5133 flags.go:64] FLAG: --tls-cipher-suites="[]" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.578585 5133 flags.go:64] FLAG: --tls-min-version="" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.578594 5133 flags.go:64] FLAG: --tls-private-key-file="" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.578603 5133 flags.go:64] FLAG: --topology-manager-policy="none" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.578612 5133 flags.go:64] FLAG: --topology-manager-policy-options="" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.578621 5133 flags.go:64] FLAG: --topology-manager-scope="container" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.578630 5133 flags.go:64] FLAG: --v="2" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.578642 5133 flags.go:64] FLAG: --version="false" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.578655 5133 flags.go:64] FLAG: --vmodule="" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.578666 5133 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.578676 5133 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.590504 5133 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.590532 5133 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.590541 5133 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.590551 5133 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.590561 5133 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.590571 5133 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.590579 5133 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.590587 5133 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.590598 5133 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.590613 5133 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.590626 5133 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.590636 5133 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.590646 5133 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.590657 5133 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.590668 5133 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.590678 5133 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.590687 5133 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.590739 5133 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.590750 5133 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.590760 5133 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.590770 5133 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.590779 5133 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.590788 5133 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.590796 5133 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.590805 5133 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.590813 5133 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.590821 5133 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.590829 5133 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.590840 5133 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.590850 5133 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.590859 5133 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.590868 5133 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.590876 5133 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.590886 5133 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.590896 5133 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.590905 5133 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.590913 5133 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.591145 5133 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.591155 5133 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.591173 5133 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.591189 5133 feature_gate.go:330] unrecognized feature gate: Example Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.591200 5133 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.591210 5133 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.591220 5133 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.591230 5133 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.591240 5133 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.591252 5133 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.591261 5133 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.591270 5133 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.591279 5133 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.591289 5133 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.591301 5133 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.591311 5133 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.591320 5133 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.591330 5133 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.591338 5133 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.591347 5133 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.591356 5133 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.591365 5133 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.591374 5133 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.591382 5133 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.591391 5133 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.591399 5133 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.591408 5133 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.591418 5133 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.591428 5133 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.591438 5133 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.591485 5133 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.591493 5133 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.591504 5133 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.591512 5133 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.595760 5133 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.606529 5133 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.606590 5133 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.606717 5133 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.606731 5133 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.606743 5133 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.606752 5133 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.606762 5133 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.606774 5133 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.606785 5133 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.606794 5133 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.606803 5133 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.606812 5133 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.606821 5133 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.606830 5133 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.606837 5133 feature_gate.go:330] unrecognized feature gate: Example Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.606848 5133 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.606859 5133 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.606870 5133 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.606881 5133 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.606890 5133 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.606900 5133 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.606909 5133 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.606918 5133 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.606927 5133 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.606935 5133 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.606944 5133 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.606953 5133 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.606961 5133 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.606971 5133 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.606978 5133 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.606986 5133 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.606994 5133 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607024 5133 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607033 5133 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607040 5133 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607048 5133 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607056 5133 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607064 5133 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607074 5133 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607084 5133 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607093 5133 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607101 5133 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607109 5133 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607117 5133 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607126 5133 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607134 5133 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607142 5133 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607152 5133 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607159 5133 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607168 5133 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607176 5133 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607185 5133 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607192 5133 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607200 5133 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607210 5133 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607218 5133 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607225 5133 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607233 5133 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607241 5133 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607249 5133 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607257 5133 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607265 5133 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607273 5133 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607281 5133 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607289 5133 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607296 5133 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607304 5133 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607311 5133 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607319 5133 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607326 5133 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607335 5133 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607342 5133 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607351 5133 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.607364 5133 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607655 5133 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607670 5133 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607680 5133 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607690 5133 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607700 5133 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607710 5133 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607718 5133 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607730 5133 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607740 5133 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607749 5133 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607758 5133 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607768 5133 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607778 5133 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607787 5133 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607796 5133 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607804 5133 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607812 5133 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607820 5133 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607827 5133 feature_gate.go:330] unrecognized feature gate: Example Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607835 5133 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607843 5133 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607851 5133 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607859 5133 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607866 5133 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607876 5133 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607884 5133 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607892 5133 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607900 5133 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607908 5133 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607915 5133 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607923 5133 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607931 5133 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607939 5133 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607948 5133 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607955 5133 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607964 5133 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607972 5133 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607982 5133 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.607991 5133 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.608027 5133 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.608041 5133 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.608052 5133 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.608063 5133 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.608075 5133 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.608086 5133 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.608099 5133 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.608109 5133 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.608118 5133 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.608127 5133 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.608135 5133 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.608143 5133 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.608151 5133 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.608159 5133 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.608168 5133 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.608175 5133 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.608183 5133 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.608191 5133 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.608198 5133 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.608206 5133 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.608214 5133 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.608223 5133 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.608231 5133 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.608238 5133 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.608246 5133 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.608254 5133 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.608262 5133 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.608269 5133 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.608279 5133 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.608287 5133 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.608295 5133 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 21 13:42:21 crc kubenswrapper[5133]: W1121 13:42:21.608302 5133 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.608315 5133 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.608582 5133 server.go:940] "Client rotation is on, will bootstrap in background" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.631365 5133 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.631559 5133 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.650173 5133 server.go:997] "Starting client certificate rotation" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.650216 5133 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.670275 5133 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2025-11-14 21:11:55.457550732 +0000 UTC Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.670555 5133 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.772525 5133 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.779471 5133 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 21 13:42:21 crc kubenswrapper[5133]: E1121 13:42:21.807350 5133 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.102.83.179:6443: connect: connection refused" logger="UnhandledError" Nov 21 13:42:21 crc kubenswrapper[5133]: I1121 13:42:21.827168 5133 log.go:25] "Validated CRI v1 runtime API" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.074120 5133 log.go:25] "Validated CRI v1 image API" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.100331 5133 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.129774 5133 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2025-11-21-13-36-46-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.129859 5133 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:41 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:42 fsType:tmpfs blockSize:0}] Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.160145 5133 manager.go:217] Machine: {Timestamp:2025-11-21 13:42:22.154927931 +0000 UTC m=+1.952760219 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2800000 MemoryCapacity:33654128640 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:537cb059-79e6-48e5-b353-57bb495db8a2 BootID:eb1f691e-5306-40d5-9666-4e51161aa15a Filesystems:[{Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:42 Capacity:1073741824 Type:vfs Inodes:4108170 HasInodes:true} {Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827064320 Type:vfs Inodes:4108170 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730825728 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827064320 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:41 Capacity:3365412864 Type:vfs Inodes:821634 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:64:0e:35 Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:64:0e:35 Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:db:86:4e Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:30:8b:3f Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:e4:e8:96 Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:7e:ff:29 Speed:-1 Mtu:1496} {Name:ens7.23 MacAddress:52:54:00:43:39:40 Speed:-1 Mtu:1496} {Name:eth10 MacAddress:ca:b9:1e:d6:80:fb Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:aa:c9:c7:ca:7d:b5 Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654128640 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.160794 5133 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.160995 5133 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.170861 5133 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.171230 5133 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.171276 5133 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.171499 5133 topology_manager.go:138] "Creating topology manager with none policy" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.171511 5133 container_manager_linux.go:303] "Creating device plugin manager" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.172186 5133 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.172223 5133 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.172442 5133 state_mem.go:36] "Initialized new in-memory state store" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.172533 5133 server.go:1245] "Using root directory" path="/var/lib/kubelet" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.191745 5133 kubelet.go:418] "Attempting to sync node with API server" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.191809 5133 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.191890 5133 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.191911 5133 kubelet.go:324] "Adding apiserver pod source" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.191928 5133 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Nov 21 13:42:22 crc kubenswrapper[5133]: W1121 13:42:22.207856 5133 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.179:6443: connect: connection refused Nov 21 13:42:22 crc kubenswrapper[5133]: E1121 13:42:22.208103 5133 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.179:6443: connect: connection refused" logger="UnhandledError" Nov 21 13:42:22 crc kubenswrapper[5133]: W1121 13:42:22.210327 5133 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.179:6443: connect: connection refused Nov 21 13:42:22 crc kubenswrapper[5133]: E1121 13:42:22.210455 5133 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.179:6443: connect: connection refused" logger="UnhandledError" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.210407 5133 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.213231 5133 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.216020 5133 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.229612 5133 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.229692 5133 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.229708 5133 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.229722 5133 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.229744 5133 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.229758 5133 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.229772 5133 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.229794 5133 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.229809 5133 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.229826 5133 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.229873 5133 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.229888 5133 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.229920 5133 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.230720 5133 server.go:1280] "Started kubelet" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.230862 5133 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.230921 5133 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.179:6443: connect: connection refused Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.231127 5133 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.231885 5133 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Nov 21 13:42:22 crc systemd[1]: Started Kubernetes Kubelet. Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.244207 5133 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.244271 5133 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Nov 21 13:42:22 crc kubenswrapper[5133]: E1121 13:42:22.244657 5133 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.244613 5133 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-11 03:21:58.686532193 +0000 UTC Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.244762 5133 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 1213h39m36.441778566s for next certificate rotation Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.245787 5133 volume_manager.go:287] "The desired_state_of_world populator starts" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.245823 5133 volume_manager.go:289] "Starting Kubelet Volume Manager" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.246199 5133 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.246719 5133 factory.go:55] Registering systemd factory Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.247453 5133 factory.go:221] Registration of the systemd container factory successfully Nov 21 13:42:22 crc kubenswrapper[5133]: W1121 13:42:22.247470 5133 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.179:6443: connect: connection refused Nov 21 13:42:22 crc kubenswrapper[5133]: E1121 13:42:22.247463 5133 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.179:6443: connect: connection refused" interval="200ms" Nov 21 13:42:22 crc kubenswrapper[5133]: E1121 13:42:22.266131 5133 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.179:6443: connect: connection refused" logger="UnhandledError" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.265919 5133 factory.go:153] Registering CRI-O factory Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.267095 5133 factory.go:221] Registration of the crio container factory successfully Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.267252 5133 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.267303 5133 factory.go:103] Registering Raw factory Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.267339 5133 manager.go:1196] Started watching for new ooms in manager Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.268846 5133 manager.go:319] Starting recovery of all containers Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.296388 5133 server.go:460] "Adding debug handlers to kubelet server" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.306404 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.306513 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.306548 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.306567 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.306582 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.306597 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.306611 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.306628 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.306649 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.306677 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.306695 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.306710 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.306724 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.306745 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.306760 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.306820 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.306837 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.306861 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.306877 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.306892 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.306908 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.306924 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.306976 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.307017 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.307040 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.307059 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.307085 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.307103 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.307121 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.307141 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.307156 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.307172 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.307189 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.307205 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.307220 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.307237 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.307253 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.307270 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.307287 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.307302 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.307319 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.307336 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.307354 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.307370 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.307385 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.307403 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.307419 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.307433 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.307450 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.307466 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.307481 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.307496 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.307519 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.307536 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.307565 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.307581 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.307602 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.307621 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.307636 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.307651 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.307670 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.307685 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.307700 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.307718 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.307733 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.307750 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.307765 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.307784 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.307801 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.307817 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.307833 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.307848 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.307867 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.307885 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.307900 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.307916 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.307933 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.307952 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.307969 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.307984 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.308022 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.308041 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.308058 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.308073 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.308089 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.308104 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.308122 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.308141 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.308160 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.308176 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.308192 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.308209 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.308226 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.308243 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.308260 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.308277 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.308297 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.308313 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.308331 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.308348 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.308366 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.308383 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.308401 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.308420 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.308453 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: E1121 13:42:22.305409 5133 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.179:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.187a09670a747164 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-21 13:42:22.230655332 +0000 UTC m=+2.028487610,LastTimestamp:2025-11-21 13:42:22.230655332 +0000 UTC m=+2.028487610,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.316284 5133 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.316322 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.316341 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.316356 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.316370 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.316385 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.316400 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.316417 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.316432 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.316446 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.316460 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.316473 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.316485 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.316498 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.316511 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.316526 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.316539 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.316552 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.316582 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.316596 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.316610 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.316623 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.316636 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.316651 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.316666 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.316680 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.316693 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.316712 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.316724 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.316737 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.316750 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.316764 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.316779 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.316796 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.316870 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.316889 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.316907 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.316924 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.316941 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.316958 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.316975 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.316991 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.317031 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.317049 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.317066 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.317088 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.317108 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.317125 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.317142 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.317158 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.317174 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.317191 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.317208 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.317224 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.317239 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.317257 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.317311 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.317327 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.317340 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.317355 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.317369 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.317386 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.317400 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.317415 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.317428 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.317442 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.317455 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.317470 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.317483 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.317496 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.317509 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.317522 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.317535 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.317549 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.317564 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.317578 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.317594 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.317608 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.317622 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.317636 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.317649 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.317664 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.317678 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.317695 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.317708 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.317722 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.317737 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.317751 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.317764 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.317779 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.317793 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.317807 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.317821 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.317837 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.317850 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.317874 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.317888 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.317901 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.317931 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.317946 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.317960 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.317974 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.317987 5133 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.318021 5133 reconstruct.go:97] "Volume reconstruction finished" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.318036 5133 reconciler.go:26] "Reconciler: start to sync state" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.332224 5133 manager.go:324] Recovery completed Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.342995 5133 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 13:42:22 crc kubenswrapper[5133]: E1121 13:42:22.344984 5133 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.345480 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.345518 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.345530 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.346630 5133 cpu_manager.go:225] "Starting CPU manager" policy="none" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.346649 5133 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.346696 5133 state_mem.go:36] "Initialized new in-memory state store" Nov 21 13:42:22 crc kubenswrapper[5133]: E1121 13:42:22.445308 5133 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.452511 5133 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.456204 5133 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.456274 5133 status_manager.go:217] "Starting to sync pod status with apiserver" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.456311 5133 kubelet.go:2335] "Starting kubelet main sync loop" Nov 21 13:42:22 crc kubenswrapper[5133]: E1121 13:42:22.456386 5133 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Nov 21 13:42:22 crc kubenswrapper[5133]: W1121 13:42:22.457670 5133 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.179:6443: connect: connection refused Nov 21 13:42:22 crc kubenswrapper[5133]: E1121 13:42:22.457733 5133 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.179:6443: connect: connection refused" logger="UnhandledError" Nov 21 13:42:22 crc kubenswrapper[5133]: E1121 13:42:22.468138 5133 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.179:6443: connect: connection refused" interval="400ms" Nov 21 13:42:22 crc kubenswrapper[5133]: E1121 13:42:22.546138 5133 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 21 13:42:22 crc kubenswrapper[5133]: E1121 13:42:22.556501 5133 kubelet.go:2359] "Skipping pod synchronization" err="container runtime status check may not have completed yet" Nov 21 13:42:22 crc kubenswrapper[5133]: E1121 13:42:22.646823 5133 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.712381 5133 policy_none.go:49] "None policy: Start" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.713787 5133 memory_manager.go:170] "Starting memorymanager" policy="None" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.713888 5133 state_mem.go:35] "Initializing new in-memory state store" Nov 21 13:42:22 crc kubenswrapper[5133]: E1121 13:42:22.747080 5133 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 21 13:42:22 crc kubenswrapper[5133]: E1121 13:42:22.757339 5133 kubelet.go:2359] "Skipping pod synchronization" err="container runtime status check may not have completed yet" Nov 21 13:42:22 crc kubenswrapper[5133]: E1121 13:42:22.847867 5133 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 21 13:42:22 crc kubenswrapper[5133]: E1121 13:42:22.868959 5133 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.179:6443: connect: connection refused" interval="800ms" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.914289 5133 manager.go:334] "Starting Device Plugin manager" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.914819 5133 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.914864 5133 server.go:79] "Starting device plugin registration server" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.915639 5133 eviction_manager.go:189] "Eviction manager: starting control loop" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.915673 5133 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.915813 5133 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.915952 5133 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Nov 21 13:42:22 crc kubenswrapper[5133]: I1121 13:42:22.915975 5133 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Nov 21 13:42:22 crc kubenswrapper[5133]: E1121 13:42:22.939489 5133 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.016925 5133 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.019226 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.019284 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.019297 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.019361 5133 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 21 13:42:23 crc kubenswrapper[5133]: E1121 13:42:23.020131 5133 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.179:6443: connect: connection refused" node="crc" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.158058 5133 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc","openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc"] Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.158197 5133 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.159893 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.160157 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.160176 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.160413 5133 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.160925 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.161080 5133 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.161633 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.161705 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.161730 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.161916 5133 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.162140 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.162215 5133 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.162595 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.162644 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.162667 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.163426 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.163521 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.163544 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.163819 5133 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.163969 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.164072 5133 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.165118 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.165245 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.165280 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.165369 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.165413 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.165439 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.165656 5133 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.165805 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.165846 5133 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.165953 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.166049 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.166074 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.167420 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.167465 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.167439 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.167493 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.167526 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.167545 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.167771 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.167799 5133 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.169383 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.169440 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.169483 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:23 crc kubenswrapper[5133]: W1121 13:42:23.187199 5133 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.179:6443: connect: connection refused Nov 21 13:42:23 crc kubenswrapper[5133]: E1121 13:42:23.187315 5133 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.179:6443: connect: connection refused" logger="UnhandledError" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.221263 5133 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.223201 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.223271 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.223291 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.223335 5133 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 21 13:42:23 crc kubenswrapper[5133]: E1121 13:42:23.224056 5133 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.179:6443: connect: connection refused" node="crc" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.231667 5133 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.179:6443: connect: connection refused Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.335820 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.335902 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.335936 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.335968 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.336040 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.336079 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.336332 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.336417 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.336461 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.336492 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.336542 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.336590 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.336639 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.336674 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.336770 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.438511 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.438610 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.438666 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.438711 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.438751 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.438828 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.438870 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.438879 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.438915 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.438956 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.438979 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.438995 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.438872 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.438908 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.439092 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.439184 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.439335 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.439410 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.439417 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.439498 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.439534 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.439501 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.439585 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.439613 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.439616 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.439648 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.439686 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.439905 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.440036 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.440192 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.496245 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 21 13:42:23 crc kubenswrapper[5133]: W1121 13:42:23.506461 5133 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.179:6443: connect: connection refused Nov 21 13:42:23 crc kubenswrapper[5133]: E1121 13:42:23.506582 5133 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.179:6443: connect: connection refused" logger="UnhandledError" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.510143 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.537966 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.567241 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.580754 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.625100 5133 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.626674 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.626727 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.626742 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.626787 5133 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 21 13:42:23 crc kubenswrapper[5133]: E1121 13:42:23.627600 5133 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.179:6443: connect: connection refused" node="crc" Nov 21 13:42:23 crc kubenswrapper[5133]: E1121 13:42:23.671249 5133 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.179:6443: connect: connection refused" interval="1.6s" Nov 21 13:42:23 crc kubenswrapper[5133]: W1121 13:42:23.677773 5133 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.179:6443: connect: connection refused Nov 21 13:42:23 crc kubenswrapper[5133]: E1121 13:42:23.677878 5133 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.179:6443: connect: connection refused" logger="UnhandledError" Nov 21 13:42:23 crc kubenswrapper[5133]: W1121 13:42:23.729747 5133 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.179:6443: connect: connection refused Nov 21 13:42:23 crc kubenswrapper[5133]: E1121 13:42:23.729892 5133 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.179:6443: connect: connection refused" logger="UnhandledError" Nov 21 13:42:23 crc kubenswrapper[5133]: W1121 13:42:23.766141 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-decb1dbe67c09c5f5edd326eac1c4e7af9a48d42ee1b1e20713b8b5ac4ec33f6 WatchSource:0}: Error finding container decb1dbe67c09c5f5edd326eac1c4e7af9a48d42ee1b1e20713b8b5ac4ec33f6: Status 404 returned error can't find the container with id decb1dbe67c09c5f5edd326eac1c4e7af9a48d42ee1b1e20713b8b5ac4ec33f6 Nov 21 13:42:23 crc kubenswrapper[5133]: W1121 13:42:23.770231 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-16820ddc7b033a213727420948946ab4465a2aa75da38507097172acbae878f8 WatchSource:0}: Error finding container 16820ddc7b033a213727420948946ab4465a2aa75da38507097172acbae878f8: Status 404 returned error can't find the container with id 16820ddc7b033a213727420948946ab4465a2aa75da38507097172acbae878f8 Nov 21 13:42:23 crc kubenswrapper[5133]: W1121 13:42:23.776958 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-fcdab4625c3b67c324885686278d8e9d198c5e877ebd8195c58e88f88c54401f WatchSource:0}: Error finding container fcdab4625c3b67c324885686278d8e9d198c5e877ebd8195c58e88f88c54401f: Status 404 returned error can't find the container with id fcdab4625c3b67c324885686278d8e9d198c5e877ebd8195c58e88f88c54401f Nov 21 13:42:23 crc kubenswrapper[5133]: W1121 13:42:23.778944 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/crio-24420d5b8bd9fc939fd46d2ae021b945e35ac8f1b57ba32752e88e171d552eee WatchSource:0}: Error finding container 24420d5b8bd9fc939fd46d2ae021b945e35ac8f1b57ba32752e88e171d552eee: Status 404 returned error can't find the container with id 24420d5b8bd9fc939fd46d2ae021b945e35ac8f1b57ba32752e88e171d552eee Nov 21 13:42:23 crc kubenswrapper[5133]: W1121 13:42:23.792397 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-3f98de888f29b1d7f183373957524df69d83ef239573f97c72203cf10bd5e131 WatchSource:0}: Error finding container 3f98de888f29b1d7f183373957524df69d83ef239573f97c72203cf10bd5e131: Status 404 returned error can't find the container with id 3f98de888f29b1d7f183373957524df69d83ef239573f97c72203cf10bd5e131 Nov 21 13:42:23 crc kubenswrapper[5133]: I1121 13:42:23.992719 5133 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Nov 21 13:42:23 crc kubenswrapper[5133]: E1121 13:42:23.994753 5133 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.102.83.179:6443: connect: connection refused" logger="UnhandledError" Nov 21 13:42:24 crc kubenswrapper[5133]: I1121 13:42:24.232180 5133 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.179:6443: connect: connection refused Nov 21 13:42:24 crc kubenswrapper[5133]: I1121 13:42:24.428029 5133 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 13:42:24 crc kubenswrapper[5133]: I1121 13:42:24.430733 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:24 crc kubenswrapper[5133]: I1121 13:42:24.430791 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:24 crc kubenswrapper[5133]: I1121 13:42:24.430809 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:24 crc kubenswrapper[5133]: I1121 13:42:24.430845 5133 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 21 13:42:24 crc kubenswrapper[5133]: E1121 13:42:24.431489 5133 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.179:6443: connect: connection refused" node="crc" Nov 21 13:42:24 crc kubenswrapper[5133]: I1121 13:42:24.464753 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"16820ddc7b033a213727420948946ab4465a2aa75da38507097172acbae878f8"} Nov 21 13:42:24 crc kubenswrapper[5133]: I1121 13:42:24.466561 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"decb1dbe67c09c5f5edd326eac1c4e7af9a48d42ee1b1e20713b8b5ac4ec33f6"} Nov 21 13:42:24 crc kubenswrapper[5133]: I1121 13:42:24.468164 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"3f98de888f29b1d7f183373957524df69d83ef239573f97c72203cf10bd5e131"} Nov 21 13:42:24 crc kubenswrapper[5133]: I1121 13:42:24.469545 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"24420d5b8bd9fc939fd46d2ae021b945e35ac8f1b57ba32752e88e171d552eee"} Nov 21 13:42:24 crc kubenswrapper[5133]: I1121 13:42:24.471404 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"fcdab4625c3b67c324885686278d8e9d198c5e877ebd8195c58e88f88c54401f"} Nov 21 13:42:25 crc kubenswrapper[5133]: I1121 13:42:25.232118 5133 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.179:6443: connect: connection refused Nov 21 13:42:25 crc kubenswrapper[5133]: E1121 13:42:25.272578 5133 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.179:6443: connect: connection refused" interval="3.2s" Nov 21 13:42:25 crc kubenswrapper[5133]: W1121 13:42:25.314809 5133 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.179:6443: connect: connection refused Nov 21 13:42:25 crc kubenswrapper[5133]: E1121 13:42:25.314920 5133 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.179:6443: connect: connection refused" logger="UnhandledError" Nov 21 13:42:25 crc kubenswrapper[5133]: W1121 13:42:25.786567 5133 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.179:6443: connect: connection refused Nov 21 13:42:25 crc kubenswrapper[5133]: E1121 13:42:25.786708 5133 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.179:6443: connect: connection refused" logger="UnhandledError" Nov 21 13:42:26 crc kubenswrapper[5133]: I1121 13:42:26.032400 5133 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 13:42:26 crc kubenswrapper[5133]: I1121 13:42:26.033975 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:26 crc kubenswrapper[5133]: I1121 13:42:26.034061 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:26 crc kubenswrapper[5133]: I1121 13:42:26.034079 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:26 crc kubenswrapper[5133]: I1121 13:42:26.034116 5133 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 21 13:42:26 crc kubenswrapper[5133]: E1121 13:42:26.034786 5133 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.179:6443: connect: connection refused" node="crc" Nov 21 13:42:26 crc kubenswrapper[5133]: W1121 13:42:26.188922 5133 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.179:6443: connect: connection refused Nov 21 13:42:26 crc kubenswrapper[5133]: E1121 13:42:26.189088 5133 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.179:6443: connect: connection refused" logger="UnhandledError" Nov 21 13:42:26 crc kubenswrapper[5133]: I1121 13:42:26.232484 5133 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.179:6443: connect: connection refused Nov 21 13:42:26 crc kubenswrapper[5133]: I1121 13:42:26.480126 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57"} Nov 21 13:42:26 crc kubenswrapper[5133]: I1121 13:42:26.480241 5133 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 13:42:26 crc kubenswrapper[5133]: I1121 13:42:26.482061 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:26 crc kubenswrapper[5133]: I1121 13:42:26.482113 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:26 crc kubenswrapper[5133]: I1121 13:42:26.482130 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:26 crc kubenswrapper[5133]: I1121 13:42:26.483211 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"f81d70bdc9e1fe1b6f4de2c9a8f2f3163151b75ed1fa2a1052397ac0f04e2630"} Nov 21 13:42:26 crc kubenswrapper[5133]: I1121 13:42:26.483295 5133 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 13:42:26 crc kubenswrapper[5133]: I1121 13:42:26.484439 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:26 crc kubenswrapper[5133]: I1121 13:42:26.484496 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:26 crc kubenswrapper[5133]: I1121 13:42:26.484514 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:26 crc kubenswrapper[5133]: I1121 13:42:26.485535 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"273a23deb0bee7d80bc12f28a4056a5b843e81cc7c411273e49c3aa0fdba5182"} Nov 21 13:42:26 crc kubenswrapper[5133]: I1121 13:42:26.487443 5133 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="d3e019adb31c6276b82ca39edac3f8d20a563baeb5bc34dbd64df79c9ae690b5" exitCode=0 Nov 21 13:42:26 crc kubenswrapper[5133]: I1121 13:42:26.487493 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"d3e019adb31c6276b82ca39edac3f8d20a563baeb5bc34dbd64df79c9ae690b5"} Nov 21 13:42:26 crc kubenswrapper[5133]: I1121 13:42:26.487524 5133 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 13:42:26 crc kubenswrapper[5133]: I1121 13:42:26.488476 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:26 crc kubenswrapper[5133]: I1121 13:42:26.488515 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:26 crc kubenswrapper[5133]: I1121 13:42:26.488533 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:26 crc kubenswrapper[5133]: I1121 13:42:26.489072 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"1f00c6fbd3ed9ce5e2787b591f39bd80828b96dd53f15696adfe62c3df47ed47"} Nov 21 13:42:26 crc kubenswrapper[5133]: I1121 13:42:26.489149 5133 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 13:42:26 crc kubenswrapper[5133]: I1121 13:42:26.490181 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:26 crc kubenswrapper[5133]: I1121 13:42:26.490222 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:26 crc kubenswrapper[5133]: I1121 13:42:26.490238 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:26 crc kubenswrapper[5133]: W1121 13:42:26.528609 5133 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.179:6443: connect: connection refused Nov 21 13:42:26 crc kubenswrapper[5133]: E1121 13:42:26.528724 5133 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.179:6443: connect: connection refused" logger="UnhandledError" Nov 21 13:42:27 crc kubenswrapper[5133]: I1121 13:42:27.232141 5133 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.179:6443: connect: connection refused Nov 21 13:42:27 crc kubenswrapper[5133]: I1121 13:42:27.495604 5133 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57" exitCode=0 Nov 21 13:42:27 crc kubenswrapper[5133]: I1121 13:42:27.495672 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57"} Nov 21 13:42:27 crc kubenswrapper[5133]: I1121 13:42:27.495824 5133 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 13:42:27 crc kubenswrapper[5133]: I1121 13:42:27.497633 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:27 crc kubenswrapper[5133]: I1121 13:42:27.497696 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:27 crc kubenswrapper[5133]: I1121 13:42:27.497716 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:27 crc kubenswrapper[5133]: I1121 13:42:27.498338 5133 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="f81d70bdc9e1fe1b6f4de2c9a8f2f3163151b75ed1fa2a1052397ac0f04e2630" exitCode=0 Nov 21 13:42:27 crc kubenswrapper[5133]: I1121 13:42:27.498446 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"f81d70bdc9e1fe1b6f4de2c9a8f2f3163151b75ed1fa2a1052397ac0f04e2630"} Nov 21 13:42:27 crc kubenswrapper[5133]: I1121 13:42:27.498510 5133 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 13:42:27 crc kubenswrapper[5133]: I1121 13:42:27.502070 5133 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 13:42:27 crc kubenswrapper[5133]: I1121 13:42:27.503869 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:27 crc kubenswrapper[5133]: I1121 13:42:27.503904 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:27 crc kubenswrapper[5133]: I1121 13:42:27.503915 5133 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 13:42:27 crc kubenswrapper[5133]: I1121 13:42:27.503921 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:27 crc kubenswrapper[5133]: I1121 13:42:27.505162 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:27 crc kubenswrapper[5133]: I1121 13:42:27.505202 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:27 crc kubenswrapper[5133]: I1121 13:42:27.505221 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:27 crc kubenswrapper[5133]: I1121 13:42:27.506041 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:27 crc kubenswrapper[5133]: I1121 13:42:27.506117 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:27 crc kubenswrapper[5133]: I1121 13:42:27.506137 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:28 crc kubenswrapper[5133]: I1121 13:42:28.160494 5133 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Nov 21 13:42:28 crc kubenswrapper[5133]: E1121 13:42:28.162926 5133 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.102.83.179:6443: connect: connection refused" logger="UnhandledError" Nov 21 13:42:28 crc kubenswrapper[5133]: I1121 13:42:28.232409 5133 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.179:6443: connect: connection refused Nov 21 13:42:28 crc kubenswrapper[5133]: E1121 13:42:28.473842 5133 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.179:6443: connect: connection refused" interval="6.4s" Nov 21 13:42:28 crc kubenswrapper[5133]: I1121 13:42:28.505284 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"b5f6320dbfb8d910e52de319fe5350b435c1c9f00a2e1d5b2b953fb6d1688984"} Nov 21 13:42:28 crc kubenswrapper[5133]: I1121 13:42:28.508949 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"8e8fe024377d7d7d21f5e1826640081a7cada1c3ac389b94deccdc80360afc56"} Nov 21 13:42:28 crc kubenswrapper[5133]: I1121 13:42:28.509159 5133 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 13:42:28 crc kubenswrapper[5133]: I1121 13:42:28.511326 5133 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="1f00c6fbd3ed9ce5e2787b591f39bd80828b96dd53f15696adfe62c3df47ed47" exitCode=0 Nov 21 13:42:28 crc kubenswrapper[5133]: I1121 13:42:28.511425 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"1f00c6fbd3ed9ce5e2787b591f39bd80828b96dd53f15696adfe62c3df47ed47"} Nov 21 13:42:28 crc kubenswrapper[5133]: I1121 13:42:28.511575 5133 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 13:42:28 crc kubenswrapper[5133]: I1121 13:42:28.511898 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:28 crc kubenswrapper[5133]: I1121 13:42:28.511961 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:28 crc kubenswrapper[5133]: I1121 13:42:28.511986 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:28 crc kubenswrapper[5133]: I1121 13:42:28.513144 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:28 crc kubenswrapper[5133]: I1121 13:42:28.513196 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:28 crc kubenswrapper[5133]: I1121 13:42:28.513213 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:28 crc kubenswrapper[5133]: I1121 13:42:28.515801 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"cac876542527f108f89313704d6275aed6b735176f7f38b0fccbfcd79fdbf6e2"} Nov 21 13:42:28 crc kubenswrapper[5133]: I1121 13:42:28.518228 5133 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="2872ef4bbb23a67a9e6e7879b1d32391213af8387cfc4706062a8d034886b119" exitCode=0 Nov 21 13:42:28 crc kubenswrapper[5133]: I1121 13:42:28.518272 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"2872ef4bbb23a67a9e6e7879b1d32391213af8387cfc4706062a8d034886b119"} Nov 21 13:42:28 crc kubenswrapper[5133]: I1121 13:42:28.518383 5133 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 13:42:28 crc kubenswrapper[5133]: I1121 13:42:28.519477 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:28 crc kubenswrapper[5133]: I1121 13:42:28.519518 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:28 crc kubenswrapper[5133]: I1121 13:42:28.519536 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:29 crc kubenswrapper[5133]: I1121 13:42:29.232523 5133 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.179:6443: connect: connection refused Nov 21 13:42:29 crc kubenswrapper[5133]: I1121 13:42:29.235672 5133 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 13:42:29 crc kubenswrapper[5133]: I1121 13:42:29.242145 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:29 crc kubenswrapper[5133]: I1121 13:42:29.242210 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:29 crc kubenswrapper[5133]: I1121 13:42:29.242230 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:29 crc kubenswrapper[5133]: I1121 13:42:29.242342 5133 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 21 13:42:29 crc kubenswrapper[5133]: E1121 13:42:29.243172 5133 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.179:6443: connect: connection refused" node="crc" Nov 21 13:42:29 crc kubenswrapper[5133]: I1121 13:42:29.527709 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"d2c86055d41460f757efc29eaa62834faf3f14f9ca5ba534479d0fcd0a43d3bd"} Nov 21 13:42:29 crc kubenswrapper[5133]: I1121 13:42:29.531651 5133 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 13:42:29 crc kubenswrapper[5133]: I1121 13:42:29.531930 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"c93dddad8f7a853e1302ba96f3fc6d6626b22de64c8cfd1ee63996820d0816cd"} Nov 21 13:42:29 crc kubenswrapper[5133]: I1121 13:42:29.533077 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:29 crc kubenswrapper[5133]: I1121 13:42:29.533123 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:29 crc kubenswrapper[5133]: I1121 13:42:29.533141 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:29 crc kubenswrapper[5133]: W1121 13:42:29.992697 5133 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.179:6443: connect: connection refused Nov 21 13:42:29 crc kubenswrapper[5133]: E1121 13:42:29.993165 5133 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.179:6443: connect: connection refused" logger="UnhandledError" Nov 21 13:42:30 crc kubenswrapper[5133]: I1121 13:42:30.231876 5133 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.179:6443: connect: connection refused Nov 21 13:42:30 crc kubenswrapper[5133]: W1121 13:42:30.328081 5133 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.179:6443: connect: connection refused Nov 21 13:42:30 crc kubenswrapper[5133]: E1121 13:42:30.328230 5133 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.179:6443: connect: connection refused" logger="UnhandledError" Nov 21 13:42:30 crc kubenswrapper[5133]: I1121 13:42:30.541969 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"f53777cc5019e8fd3118cc1369a41c30ac4f1ad998d703999d0ece15a3f65fd9"} Nov 21 13:42:30 crc kubenswrapper[5133]: I1121 13:42:30.542079 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"bc6b16c41d8bc248fc4de65102a71d3875d1ab768432f61581605fa487ebfc9e"} Nov 21 13:42:30 crc kubenswrapper[5133]: I1121 13:42:30.542100 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"aa9a560543d545bd50cbb9aa0e907a992f9b3afb36de7ec5e72010dd835d2574"} Nov 21 13:42:30 crc kubenswrapper[5133]: I1121 13:42:30.546271 5133 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="506215bb84e36497eada7706b0c001abeb13965d5d495e567fcad8cda9513b77" exitCode=0 Nov 21 13:42:30 crc kubenswrapper[5133]: I1121 13:42:30.546441 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"506215bb84e36497eada7706b0c001abeb13965d5d495e567fcad8cda9513b77"} Nov 21 13:42:30 crc kubenswrapper[5133]: I1121 13:42:30.546458 5133 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 13:42:30 crc kubenswrapper[5133]: I1121 13:42:30.548044 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:30 crc kubenswrapper[5133]: I1121 13:42:30.548103 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:30 crc kubenswrapper[5133]: I1121 13:42:30.548129 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:30 crc kubenswrapper[5133]: I1121 13:42:30.551287 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"ebb7634a6507b2323d36c3d57b19c374862e0bada0e81150da9db315e5812f12"} Nov 21 13:42:30 crc kubenswrapper[5133]: I1121 13:42:30.551330 5133 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 13:42:30 crc kubenswrapper[5133]: I1121 13:42:30.553400 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:30 crc kubenswrapper[5133]: I1121 13:42:30.553461 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:30 crc kubenswrapper[5133]: I1121 13:42:30.553482 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:30 crc kubenswrapper[5133]: I1121 13:42:30.556802 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"57552cf42d0ae179e9c1c67120613da40995fbe308e06dbd466d0f71167142b2"} Nov 21 13:42:30 crc kubenswrapper[5133]: I1121 13:42:30.556856 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"ca37cc062dd8763ac13b5a07fcd06f2997b1ff9cfe38d9a9ff3091980d679932"} Nov 21 13:42:30 crc kubenswrapper[5133]: I1121 13:42:30.556893 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"f1dc13dd497fa4611689b3c047c602511745dbff2b9a797f8ea7316046531717"} Nov 21 13:42:30 crc kubenswrapper[5133]: W1121 13:42:30.930833 5133 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.179:6443: connect: connection refused Nov 21 13:42:30 crc kubenswrapper[5133]: E1121 13:42:30.930967 5133 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.179:6443: connect: connection refused" logger="UnhandledError" Nov 21 13:42:31 crc kubenswrapper[5133]: W1121 13:42:31.061380 5133 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.179:6443: connect: connection refused Nov 21 13:42:31 crc kubenswrapper[5133]: E1121 13:42:31.061523 5133 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.179:6443: connect: connection refused" logger="UnhandledError" Nov 21 13:42:31 crc kubenswrapper[5133]: I1121 13:42:31.232115 5133 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.179:6443: connect: connection refused Nov 21 13:42:31 crc kubenswrapper[5133]: E1121 13:42:31.338140 5133 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.179:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.187a09670a747164 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-21 13:42:22.230655332 +0000 UTC m=+2.028487610,LastTimestamp:2025-11-21 13:42:22.230655332 +0000 UTC m=+2.028487610,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 21 13:42:31 crc kubenswrapper[5133]: I1121 13:42:31.566645 5133 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 13:42:31 crc kubenswrapper[5133]: I1121 13:42:31.567098 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"3e225addce076ff44d60080c32d1577eb870905e26a061a1b529f785b15068d8"} Nov 21 13:42:31 crc kubenswrapper[5133]: I1121 13:42:31.567124 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"155959644e9177f102722679d2a6db2bf8c5712f583d520a6d62eebd30fb0425"} Nov 21 13:42:31 crc kubenswrapper[5133]: I1121 13:42:31.567132 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"0a6a52d576d88fd0e14affcf29b7438d7dacee5296192c0e08ed2179d90566dd"} Nov 21 13:42:31 crc kubenswrapper[5133]: I1121 13:42:31.567178 5133 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 13:42:31 crc kubenswrapper[5133]: I1121 13:42:31.567456 5133 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 13:42:31 crc kubenswrapper[5133]: I1121 13:42:31.568076 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:31 crc kubenswrapper[5133]: I1121 13:42:31.568104 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:31 crc kubenswrapper[5133]: I1121 13:42:31.568113 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:31 crc kubenswrapper[5133]: I1121 13:42:31.568532 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:31 crc kubenswrapper[5133]: I1121 13:42:31.568556 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:31 crc kubenswrapper[5133]: I1121 13:42:31.568563 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:31 crc kubenswrapper[5133]: I1121 13:42:31.569110 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:31 crc kubenswrapper[5133]: I1121 13:42:31.569153 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:31 crc kubenswrapper[5133]: I1121 13:42:31.569163 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:32 crc kubenswrapper[5133]: I1121 13:42:32.232547 5133 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.179:6443: connect: connection refused Nov 21 13:42:32 crc kubenswrapper[5133]: I1121 13:42:32.570301 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 21 13:42:32 crc kubenswrapper[5133]: I1121 13:42:32.572611 5133 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="f53777cc5019e8fd3118cc1369a41c30ac4f1ad998d703999d0ece15a3f65fd9" exitCode=255 Nov 21 13:42:32 crc kubenswrapper[5133]: I1121 13:42:32.572679 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"f53777cc5019e8fd3118cc1369a41c30ac4f1ad998d703999d0ece15a3f65fd9"} Nov 21 13:42:32 crc kubenswrapper[5133]: I1121 13:42:32.572813 5133 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 13:42:32 crc kubenswrapper[5133]: I1121 13:42:32.573774 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:32 crc kubenswrapper[5133]: I1121 13:42:32.573795 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:32 crc kubenswrapper[5133]: I1121 13:42:32.573804 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:32 crc kubenswrapper[5133]: I1121 13:42:32.574333 5133 scope.go:117] "RemoveContainer" containerID="f53777cc5019e8fd3118cc1369a41c30ac4f1ad998d703999d0ece15a3f65fd9" Nov 21 13:42:32 crc kubenswrapper[5133]: I1121 13:42:32.579097 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"c634f40816447ef5d9462c07a81da9bbb425af90bcb45653d7e235e2aea4c7ce"} Nov 21 13:42:32 crc kubenswrapper[5133]: I1121 13:42:32.579122 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"5c3ddea70dc793bad748a809e45cf43bafc8d0016300b9cc6f76681765297b6d"} Nov 21 13:42:32 crc kubenswrapper[5133]: I1121 13:42:32.579202 5133 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 13:42:32 crc kubenswrapper[5133]: I1121 13:42:32.579778 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:32 crc kubenswrapper[5133]: I1121 13:42:32.579796 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:32 crc kubenswrapper[5133]: I1121 13:42:32.579804 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:32 crc kubenswrapper[5133]: E1121 13:42:32.940590 5133 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 21 13:42:33 crc kubenswrapper[5133]: I1121 13:42:33.311880 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Nov 21 13:42:33 crc kubenswrapper[5133]: I1121 13:42:33.583783 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 21 13:42:33 crc kubenswrapper[5133]: I1121 13:42:33.586104 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73"} Nov 21 13:42:33 crc kubenswrapper[5133]: I1121 13:42:33.586200 5133 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 13:42:33 crc kubenswrapper[5133]: I1121 13:42:33.586219 5133 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 13:42:33 crc kubenswrapper[5133]: I1121 13:42:33.586338 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 21 13:42:33 crc kubenswrapper[5133]: I1121 13:42:33.587092 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:33 crc kubenswrapper[5133]: I1121 13:42:33.587174 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:33 crc kubenswrapper[5133]: I1121 13:42:33.587195 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:33 crc kubenswrapper[5133]: I1121 13:42:33.587253 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:33 crc kubenswrapper[5133]: I1121 13:42:33.587279 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:33 crc kubenswrapper[5133]: I1121 13:42:33.587290 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:34 crc kubenswrapper[5133]: I1121 13:42:34.588799 5133 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 13:42:34 crc kubenswrapper[5133]: I1121 13:42:34.588826 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 21 13:42:34 crc kubenswrapper[5133]: I1121 13:42:34.588799 5133 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 13:42:34 crc kubenswrapper[5133]: I1121 13:42:34.589881 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:34 crc kubenswrapper[5133]: I1121 13:42:34.589897 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:34 crc kubenswrapper[5133]: I1121 13:42:34.589915 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:34 crc kubenswrapper[5133]: I1121 13:42:34.589920 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:34 crc kubenswrapper[5133]: I1121 13:42:34.589926 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:34 crc kubenswrapper[5133]: I1121 13:42:34.589932 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:35 crc kubenswrapper[5133]: I1121 13:42:35.422933 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 21 13:42:35 crc kubenswrapper[5133]: I1121 13:42:35.591597 5133 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 13:42:35 crc kubenswrapper[5133]: I1121 13:42:35.592783 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:35 crc kubenswrapper[5133]: I1121 13:42:35.592868 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:35 crc kubenswrapper[5133]: I1121 13:42:35.592887 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:35 crc kubenswrapper[5133]: I1121 13:42:35.643662 5133 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 13:42:35 crc kubenswrapper[5133]: I1121 13:42:35.644914 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:35 crc kubenswrapper[5133]: I1121 13:42:35.644947 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:35 crc kubenswrapper[5133]: I1121 13:42:35.644959 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:35 crc kubenswrapper[5133]: I1121 13:42:35.644979 5133 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 21 13:42:35 crc kubenswrapper[5133]: I1121 13:42:35.691422 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 21 13:42:35 crc kubenswrapper[5133]: I1121 13:42:35.691794 5133 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 13:42:35 crc kubenswrapper[5133]: I1121 13:42:35.694901 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:35 crc kubenswrapper[5133]: I1121 13:42:35.694979 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:35 crc kubenswrapper[5133]: I1121 13:42:35.695001 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:35 crc kubenswrapper[5133]: I1121 13:42:35.703476 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 21 13:42:35 crc kubenswrapper[5133]: I1121 13:42:35.755762 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Nov 21 13:42:35 crc kubenswrapper[5133]: I1121 13:42:35.756162 5133 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 13:42:35 crc kubenswrapper[5133]: I1121 13:42:35.758074 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:35 crc kubenswrapper[5133]: I1121 13:42:35.758133 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:35 crc kubenswrapper[5133]: I1121 13:42:35.758150 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:36 crc kubenswrapper[5133]: I1121 13:42:36.211345 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 21 13:42:36 crc kubenswrapper[5133]: I1121 13:42:36.349350 5133 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Nov 21 13:42:36 crc kubenswrapper[5133]: I1121 13:42:36.596029 5133 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 13:42:36 crc kubenswrapper[5133]: I1121 13:42:36.596083 5133 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 13:42:36 crc kubenswrapper[5133]: I1121 13:42:36.596374 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 21 13:42:36 crc kubenswrapper[5133]: I1121 13:42:36.597184 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:36 crc kubenswrapper[5133]: I1121 13:42:36.597228 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:36 crc kubenswrapper[5133]: I1121 13:42:36.597241 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:36 crc kubenswrapper[5133]: I1121 13:42:36.597629 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:36 crc kubenswrapper[5133]: I1121 13:42:36.597708 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:36 crc kubenswrapper[5133]: I1121 13:42:36.597728 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:36 crc kubenswrapper[5133]: I1121 13:42:36.601116 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 21 13:42:37 crc kubenswrapper[5133]: I1121 13:42:37.598975 5133 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 13:42:37 crc kubenswrapper[5133]: I1121 13:42:37.599230 5133 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 13:42:37 crc kubenswrapper[5133]: I1121 13:42:37.600019 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:37 crc kubenswrapper[5133]: I1121 13:42:37.600072 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:37 crc kubenswrapper[5133]: I1121 13:42:37.600088 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:37 crc kubenswrapper[5133]: I1121 13:42:37.600103 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:37 crc kubenswrapper[5133]: I1121 13:42:37.600111 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:37 crc kubenswrapper[5133]: I1121 13:42:37.600124 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:38 crc kubenswrapper[5133]: I1121 13:42:38.601879 5133 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 13:42:38 crc kubenswrapper[5133]: I1121 13:42:38.603156 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:38 crc kubenswrapper[5133]: I1121 13:42:38.603213 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:38 crc kubenswrapper[5133]: I1121 13:42:38.603230 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:39 crc kubenswrapper[5133]: I1121 13:42:39.330682 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 21 13:42:39 crc kubenswrapper[5133]: I1121 13:42:39.330963 5133 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 13:42:39 crc kubenswrapper[5133]: I1121 13:42:39.334310 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:39 crc kubenswrapper[5133]: I1121 13:42:39.334364 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:39 crc kubenswrapper[5133]: I1121 13:42:39.334377 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:40 crc kubenswrapper[5133]: I1121 13:42:40.409497 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 21 13:42:40 crc kubenswrapper[5133]: I1121 13:42:40.409670 5133 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 13:42:40 crc kubenswrapper[5133]: I1121 13:42:40.411008 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:40 crc kubenswrapper[5133]: I1121 13:42:40.411069 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:40 crc kubenswrapper[5133]: I1121 13:42:40.411082 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:40 crc kubenswrapper[5133]: I1121 13:42:40.530738 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 21 13:42:40 crc kubenswrapper[5133]: I1121 13:42:40.605979 5133 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 13:42:40 crc kubenswrapper[5133]: I1121 13:42:40.607024 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:40 crc kubenswrapper[5133]: I1121 13:42:40.607132 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:40 crc kubenswrapper[5133]: I1121 13:42:40.607151 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:42 crc kubenswrapper[5133]: I1121 13:42:42.552949 5133 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 21 13:42:42 crc kubenswrapper[5133]: I1121 13:42:42.553197 5133 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 21 13:42:42 crc kubenswrapper[5133]: I1121 13:42:42.561349 5133 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Nov 21 13:42:42 crc kubenswrapper[5133]: [+]log ok Nov 21 13:42:42 crc kubenswrapper[5133]: [+]etcd ok Nov 21 13:42:42 crc kubenswrapper[5133]: [+]poststarthook/openshift.io-openshift-apiserver-reachable ok Nov 21 13:42:42 crc kubenswrapper[5133]: [+]poststarthook/openshift.io-oauth-apiserver-reachable ok Nov 21 13:42:42 crc kubenswrapper[5133]: [+]poststarthook/start-apiserver-admission-initializer ok Nov 21 13:42:42 crc kubenswrapper[5133]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Nov 21 13:42:42 crc kubenswrapper[5133]: [+]poststarthook/openshift.io-api-request-count-filter ok Nov 21 13:42:42 crc kubenswrapper[5133]: [+]poststarthook/openshift.io-startkubeinformers ok Nov 21 13:42:42 crc kubenswrapper[5133]: [+]poststarthook/generic-apiserver-start-informers ok Nov 21 13:42:42 crc kubenswrapper[5133]: [+]poststarthook/priority-and-fairness-config-consumer ok Nov 21 13:42:42 crc kubenswrapper[5133]: [+]poststarthook/priority-and-fairness-filter ok Nov 21 13:42:42 crc kubenswrapper[5133]: [+]poststarthook/storage-object-count-tracker-hook ok Nov 21 13:42:42 crc kubenswrapper[5133]: [+]poststarthook/start-apiextensions-informers ok Nov 21 13:42:42 crc kubenswrapper[5133]: [-]poststarthook/start-apiextensions-controllers failed: reason withheld Nov 21 13:42:42 crc kubenswrapper[5133]: [-]poststarthook/crd-informer-synced failed: reason withheld Nov 21 13:42:42 crc kubenswrapper[5133]: [+]poststarthook/start-system-namespaces-controller ok Nov 21 13:42:42 crc kubenswrapper[5133]: [+]poststarthook/start-cluster-authentication-info-controller ok Nov 21 13:42:42 crc kubenswrapper[5133]: [+]poststarthook/start-kube-apiserver-identity-lease-controller ok Nov 21 13:42:42 crc kubenswrapper[5133]: [+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok Nov 21 13:42:42 crc kubenswrapper[5133]: [+]poststarthook/start-legacy-token-tracking-controller ok Nov 21 13:42:42 crc kubenswrapper[5133]: [-]poststarthook/start-service-ip-repair-controllers failed: reason withheld Nov 21 13:42:42 crc kubenswrapper[5133]: [-]poststarthook/rbac/bootstrap-roles failed: reason withheld Nov 21 13:42:42 crc kubenswrapper[5133]: [-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld Nov 21 13:42:42 crc kubenswrapper[5133]: [-]poststarthook/priority-and-fairness-config-producer failed: reason withheld Nov 21 13:42:42 crc kubenswrapper[5133]: [-]poststarthook/bootstrap-controller failed: reason withheld Nov 21 13:42:42 crc kubenswrapper[5133]: [+]poststarthook/aggregator-reload-proxy-client-cert ok Nov 21 13:42:42 crc kubenswrapper[5133]: [+]poststarthook/start-kube-aggregator-informers ok Nov 21 13:42:42 crc kubenswrapper[5133]: [+]poststarthook/apiservice-status-local-available-controller ok Nov 21 13:42:42 crc kubenswrapper[5133]: [+]poststarthook/apiservice-status-remote-available-controller ok Nov 21 13:42:42 crc kubenswrapper[5133]: [-]poststarthook/apiservice-registration-controller failed: reason withheld Nov 21 13:42:42 crc kubenswrapper[5133]: [+]poststarthook/apiservice-wait-for-first-sync ok Nov 21 13:42:42 crc kubenswrapper[5133]: [-]poststarthook/apiservice-discovery-controller failed: reason withheld Nov 21 13:42:42 crc kubenswrapper[5133]: [+]poststarthook/kube-apiserver-autoregistration ok Nov 21 13:42:42 crc kubenswrapper[5133]: [-]autoregister-completion failed: reason withheld Nov 21 13:42:42 crc kubenswrapper[5133]: [+]poststarthook/apiservice-openapi-controller ok Nov 21 13:42:42 crc kubenswrapper[5133]: [+]poststarthook/apiservice-openapiv3-controller ok Nov 21 13:42:42 crc kubenswrapper[5133]: livez check failed Nov 21 13:42:42 crc kubenswrapper[5133]: I1121 13:42:42.561437 5133 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 21 13:42:42 crc kubenswrapper[5133]: E1121 13:42:42.941467 5133 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 21 13:42:43 crc kubenswrapper[5133]: I1121 13:42:43.410283 5133 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 21 13:42:43 crc kubenswrapper[5133]: I1121 13:42:43.410368 5133 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 21 13:42:43 crc kubenswrapper[5133]: I1121 13:42:43.820444 5133 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Liveness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Nov 21 13:42:43 crc kubenswrapper[5133]: I1121 13:42:43.820513 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Nov 21 13:42:45 crc kubenswrapper[5133]: I1121 13:42:45.796566 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Nov 21 13:42:45 crc kubenswrapper[5133]: I1121 13:42:45.796842 5133 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 13:42:45 crc kubenswrapper[5133]: I1121 13:42:45.799462 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:45 crc kubenswrapper[5133]: I1121 13:42:45.799539 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:45 crc kubenswrapper[5133]: I1121 13:42:45.799563 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:45 crc kubenswrapper[5133]: I1121 13:42:45.817363 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Nov 21 13:42:46 crc kubenswrapper[5133]: I1121 13:42:46.216689 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 21 13:42:46 crc kubenswrapper[5133]: I1121 13:42:46.216953 5133 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 13:42:46 crc kubenswrapper[5133]: I1121 13:42:46.217524 5133 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Nov 21 13:42:46 crc kubenswrapper[5133]: I1121 13:42:46.218314 5133 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Nov 21 13:42:46 crc kubenswrapper[5133]: I1121 13:42:46.218891 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:46 crc kubenswrapper[5133]: I1121 13:42:46.218984 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:46 crc kubenswrapper[5133]: I1121 13:42:46.219043 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:46 crc kubenswrapper[5133]: I1121 13:42:46.224970 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 21 13:42:46 crc kubenswrapper[5133]: I1121 13:42:46.621859 5133 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 13:42:46 crc kubenswrapper[5133]: I1121 13:42:46.621924 5133 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 13:42:46 crc kubenswrapper[5133]: I1121 13:42:46.622311 5133 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Nov 21 13:42:46 crc kubenswrapper[5133]: I1121 13:42:46.622414 5133 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Nov 21 13:42:46 crc kubenswrapper[5133]: I1121 13:42:46.623460 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:46 crc kubenswrapper[5133]: I1121 13:42:46.623502 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:46 crc kubenswrapper[5133]: I1121 13:42:46.623518 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:46 crc kubenswrapper[5133]: I1121 13:42:46.624278 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:46 crc kubenswrapper[5133]: I1121 13:42:46.624351 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:46 crc kubenswrapper[5133]: I1121 13:42:46.624368 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:47 crc kubenswrapper[5133]: E1121 13:42:47.543763 5133 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": context deadline exceeded" interval="7s" Nov 21 13:42:47 crc kubenswrapper[5133]: I1121 13:42:47.548825 5133 trace.go:236] Trace[1462787071]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (21-Nov-2025 13:42:36.673) (total time: 10875ms): Nov 21 13:42:47 crc kubenswrapper[5133]: Trace[1462787071]: ---"Objects listed" error: 10875ms (13:42:47.548) Nov 21 13:42:47 crc kubenswrapper[5133]: Trace[1462787071]: [10.875103863s] [10.875103863s] END Nov 21 13:42:47 crc kubenswrapper[5133]: I1121 13:42:47.548868 5133 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 21 13:42:47 crc kubenswrapper[5133]: I1121 13:42:47.549157 5133 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 21 13:42:47 crc kubenswrapper[5133]: E1121 13:42:47.549248 5133 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Nov 21 13:42:47 crc kubenswrapper[5133]: I1121 13:42:47.549585 5133 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Nov 21 13:42:47 crc kubenswrapper[5133]: I1121 13:42:47.551160 5133 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 21 13:42:47 crc kubenswrapper[5133]: I1121 13:42:47.577895 5133 reflector.go:368] Caches populated for *v1.CertificateSigningRequest from k8s.io/client-go/tools/watch/informerwatcher.go:146 Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.208310 5133 apiserver.go:52] "Watching apiserver" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.212646 5133 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.212958 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb","openshift-network-operator/iptables-alerter-4ln5h","openshift-network-operator/network-operator-58b4c7f79c-55gtf"] Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.213496 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.213611 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 21 13:42:48 crc kubenswrapper[5133]: E1121 13:42:48.213715 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.213729 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:42:48 crc kubenswrapper[5133]: E1121 13:42:48.213909 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.213508 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.213763 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.213566 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:42:48 crc kubenswrapper[5133]: E1121 13:42:48.214234 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.219817 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.220412 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.221111 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.222383 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.222391 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.223055 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.223321 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.223385 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.224936 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.247037 5133 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.253122 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.253184 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.253227 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.253252 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.253276 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.253310 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.253345 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.253381 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.253429 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.253461 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.253489 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.253518 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.253558 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.253586 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.253618 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.253645 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.253670 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.253698 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.253677 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.253726 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.253757 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.253787 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.253773 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.253813 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.253844 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.253876 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.253903 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.253913 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.253935 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.254014 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.254046 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.254076 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.254106 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.254149 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.254172 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.254204 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.254235 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.254267 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.254297 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.254328 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.254358 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.254357 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.254364 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.254387 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.254420 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.254454 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.254496 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.254582 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.254609 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.254609 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.254634 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.254634 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.254648 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.254652 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.254752 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.254775 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.254794 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.254812 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.254834 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.254853 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.254872 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.254877 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.254891 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.254909 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.254930 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.254947 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.254967 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.254978 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.254989 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.255116 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.255136 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.255205 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.255218 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.255288 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.255331 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.255369 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.255411 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.255445 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.255479 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.255514 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.255548 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.255582 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.255618 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.255652 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.255684 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.255718 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.255758 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.255810 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.255862 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.255894 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.255924 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.255976 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.256043 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.256082 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.256115 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.256156 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.256201 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.256260 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.256314 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.256367 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.256428 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.256478 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.256521 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.256554 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.256588 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.256621 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.256655 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.256691 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.256725 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.256764 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.256814 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.256912 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.256947 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.255233 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.258900 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.255292 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.255338 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.255431 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.255610 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.255617 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.255630 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.255807 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.255810 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.255833 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.255843 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.256022 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.256079 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.259110 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.256096 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.256272 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.256296 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.256326 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.256537 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.256519 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.256623 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.256741 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.256769 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.256843 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.256928 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.257268 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.257742 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.258343 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.258400 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.258680 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.258739 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.259341 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.258783 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.259406 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.259625 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: E1121 13:42:48.258898 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 13:42:48.758875129 +0000 UTC m=+28.556707457 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.256992 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.259178 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.259225 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.259785 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.259862 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.260706 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.260763 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.260872 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.261035 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.261134 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.261193 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.261417 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.261479 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.261517 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.261550 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.262054 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.262107 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.262142 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.262177 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.262210 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.262246 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.262279 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.262314 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.262346 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.262380 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.262414 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.262447 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.262504 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.263421 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.263465 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.263501 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.263536 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.263574 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.263610 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.263643 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.263676 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.263711 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.263744 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.263780 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.263819 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.278144 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.278208 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.278246 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.278269 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.278301 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.278332 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.278355 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.278382 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.278413 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.278444 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.278468 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.278494 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.278521 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.278545 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.278573 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.278604 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.278634 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.278683 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.278719 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.278749 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.278773 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.278801 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.278830 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.278857 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.278884 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.278912 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.278947 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.278972 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.279016 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.279049 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.279075 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.279105 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.279135 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.279160 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.279188 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.279218 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.279248 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.279275 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.279307 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.279334 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.279362 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.279389 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.279421 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.279450 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.279476 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.279506 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.279537 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.279578 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.279608 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.279636 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.279669 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.279696 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.279720 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.279747 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.279779 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.279807 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.279832 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.279860 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.279902 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.279928 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.279954 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.279982 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.280031 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.280085 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.280115 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.280181 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.280217 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.280250 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.280285 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.280318 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.280347 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.280380 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.280411 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.280438 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.280470 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.280497 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.280527 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.280557 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.280583 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.280687 5133 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.280703 5133 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.280716 5133 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.280729 5133 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.280747 5133 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.280760 5133 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.280775 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.280796 5133 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.280812 5133 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.280826 5133 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.280839 5133 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.280856 5133 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.280869 5133 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.280883 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.280897 5133 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.280915 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.280928 5133 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.280942 5133 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.280961 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.280975 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.280987 5133 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.281034 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.281055 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.281069 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.281083 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.281098 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.281116 5133 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.281129 5133 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.281142 5133 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.281156 5133 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.281182 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.281195 5133 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.281209 5133 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.281228 5133 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.281242 5133 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.281257 5133 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.281269 5133 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.281287 5133 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.281300 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.281313 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.281326 5133 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.283355 5133 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.286643 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.261421 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.299684 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.261599 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.262568 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.262900 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.263568 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.264472 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.264885 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.265281 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.265386 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.265560 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.265730 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.266029 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.266323 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.266399 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.266475 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.266493 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.266654 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.277428 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.277589 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.279872 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.280227 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.280338 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.280533 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.280736 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.281123 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.282303 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.282709 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.282738 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.282791 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.282967 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.283060 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.283463 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.283487 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.283581 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.283591 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.283014 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.283688 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.283372 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.283876 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.285194 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.285427 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.285471 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.285677 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.285800 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.285942 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.286100 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.286162 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.286300 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.286304 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.286340 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.285893 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.286646 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.286801 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.286813 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.287448 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.287505 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.287839 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.288107 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.288127 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.299078 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.299220 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.299249 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.299439 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.299575 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.299654 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.299739 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.299932 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.299937 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.300073 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: E1121 13:42:48.300217 5133 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.300325 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.300536 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.300613 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.300868 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.300911 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.301147 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.301420 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.301910 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.302147 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.302400 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.302624 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.302718 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.302935 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.302941 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.304038 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.304396 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.304699 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.305117 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.305396 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.305488 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.305654 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.305837 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.305831 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.305879 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: E1121 13:42:48.305923 5133 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 21 13:42:48 crc kubenswrapper[5133]: E1121 13:42:48.306023 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-21 13:42:48.805978887 +0000 UTC m=+28.603811145 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.306042 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.306054 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: E1121 13:42:48.306219 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-21 13:42:48.806209003 +0000 UTC m=+28.604041261 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.306274 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.306408 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.306466 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.306630 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.306910 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.307042 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.307051 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.307577 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.307617 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.307902 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.308206 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.308289 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.308788 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.308806 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.309134 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.309764 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.309801 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.309833 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.310050 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.310589 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.311051 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.310171 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.312648 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.313117 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.315897 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.316309 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 21 13:42:48 crc kubenswrapper[5133]: E1121 13:42:48.316689 5133 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 21 13:42:48 crc kubenswrapper[5133]: E1121 13:42:48.316717 5133 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 21 13:42:48 crc kubenswrapper[5133]: E1121 13:42:48.316735 5133 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 13:42:48 crc kubenswrapper[5133]: E1121 13:42:48.316796 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-21 13:42:48.816775452 +0000 UTC m=+28.614607710 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.317312 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.317391 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.319982 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.320045 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.322244 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.322974 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.323402 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: E1121 13:42:48.325090 5133 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 21 13:42:48 crc kubenswrapper[5133]: E1121 13:42:48.325117 5133 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 21 13:42:48 crc kubenswrapper[5133]: E1121 13:42:48.325134 5133 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 13:42:48 crc kubenswrapper[5133]: E1121 13:42:48.325190 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-21 13:42:48.825173471 +0000 UTC m=+28.623005709 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.325459 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.325854 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.326317 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.326524 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.327272 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.327733 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.327751 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.330303 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.331170 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.333035 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.333802 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.340502 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.341150 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.353060 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.354126 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.356290 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.359606 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.365212 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.382454 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.382502 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.382562 5133 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.382575 5133 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.382585 5133 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.382596 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.382605 5133 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.382614 5133 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.382622 5133 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.382631 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.382643 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.382651 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.382661 5133 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.382669 5133 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.382677 5133 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.382700 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.382725 5133 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.382738 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.382748 5133 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.382758 5133 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.382769 5133 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.382779 5133 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.382789 5133 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.382790 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.382799 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.382827 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.382837 5133 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.382847 5133 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.382857 5133 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.382866 5133 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.382875 5133 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.382883 5133 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.382892 5133 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.382900 5133 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.382909 5133 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.382917 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.382927 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.382936 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.382944 5133 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.382952 5133 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.382960 5133 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.382967 5133 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.382975 5133 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.382983 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.382991 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383028 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383043 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383051 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383061 5133 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383070 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383078 5133 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383086 5133 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383097 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383155 5133 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383166 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383175 5133 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383182 5133 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383191 5133 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383200 5133 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383210 5133 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383217 5133 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383252 5133 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383262 5133 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383271 5133 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383280 5133 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383289 5133 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383299 5133 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383309 5133 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383319 5133 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383328 5133 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383338 5133 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383347 5133 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383355 5133 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383364 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.382729 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383372 5133 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383461 5133 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383475 5133 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383486 5133 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383497 5133 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383508 5133 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383518 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383536 5133 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383545 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383554 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383563 5133 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383573 5133 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383584 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383595 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383604 5133 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383614 5133 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383623 5133 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383631 5133 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383640 5133 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383649 5133 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383658 5133 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383666 5133 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383675 5133 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383684 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383693 5133 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383701 5133 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383710 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383719 5133 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383730 5133 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383738 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383748 5133 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383757 5133 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383766 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383774 5133 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383785 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383794 5133 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383804 5133 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383812 5133 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383821 5133 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383830 5133 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383839 5133 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383847 5133 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383855 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383864 5133 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383872 5133 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383880 5133 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383888 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383896 5133 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383906 5133 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383914 5133 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383924 5133 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383932 5133 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383941 5133 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383950 5133 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383958 5133 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383966 5133 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383975 5133 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383983 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.383991 5133 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.384017 5133 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.384030 5133 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.384039 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.384047 5133 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.384056 5133 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.384067 5133 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.384076 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.384084 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.384093 5133 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.384103 5133 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.384111 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.384121 5133 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.384131 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.384141 5133 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.384154 5133 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.384163 5133 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.384172 5133 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.384180 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.384190 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.384199 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.384207 5133 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.384215 5133 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.384223 5133 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.384475 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.393126 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.403280 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.413653 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.461170 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.461835 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.462886 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.463508 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.464522 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.465076 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.465935 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.466869 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.467500 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.468495 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.469248 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.470306 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.470825 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.471363 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.472270 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.472792 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.473336 5133 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.473774 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.474173 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.474734 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.475928 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.476370 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.477271 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.477668 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.478606 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.478974 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.479548 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.480567 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.481040 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.481929 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.482539 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.483371 5133 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.483488 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.485226 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.486122 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.486540 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.487955 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.488632 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.489777 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.490989 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.493448 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.494105 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.495485 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.496371 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.497605 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.498093 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.498647 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.499225 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.500157 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.500767 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.501380 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.501980 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.502649 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.503434 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.504118 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.529050 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.550347 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 21 13:42:48 crc kubenswrapper[5133]: W1121 13:42:48.551780 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podef543e1b_8068_4ea3_b32a_61027b32e95d.slice/crio-c9b19b29e4db350ca027afdf444b4392776f13a21abc65860a02921f2f3aa75f WatchSource:0}: Error finding container c9b19b29e4db350ca027afdf444b4392776f13a21abc65860a02921f2f3aa75f: Status 404 returned error can't find the container with id c9b19b29e4db350ca027afdf444b4392776f13a21abc65860a02921f2f3aa75f Nov 21 13:42:48 crc kubenswrapper[5133]: W1121 13:42:48.562034 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod37a5e44f_9a88_4405_be8a_b645485e7312.slice/crio-bf73badacab7df3bd90dbca2fce319b3ccd7e7ec58f2e6e92576353c2c451347 WatchSource:0}: Error finding container bf73badacab7df3bd90dbca2fce319b3ccd7e7ec58f2e6e92576353c2c451347: Status 404 returned error can't find the container with id bf73badacab7df3bd90dbca2fce319b3ccd7e7ec58f2e6e92576353c2c451347 Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.601211 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 21 13:42:48 crc kubenswrapper[5133]: W1121 13:42:48.617568 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd75a4c96_2883_4a0b_bab2_0fab2b6c0b49.slice/crio-136726ee69c739bfac856637a29c9fb54db4a36fa8fb757ae9e5d8ebe4320796 WatchSource:0}: Error finding container 136726ee69c739bfac856637a29c9fb54db4a36fa8fb757ae9e5d8ebe4320796: Status 404 returned error can't find the container with id 136726ee69c739bfac856637a29c9fb54db4a36fa8fb757ae9e5d8ebe4320796 Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.628488 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"136726ee69c739bfac856637a29c9fb54db4a36fa8fb757ae9e5d8ebe4320796"} Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.630982 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"bf73badacab7df3bd90dbca2fce319b3ccd7e7ec58f2e6e92576353c2c451347"} Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.632214 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"c9b19b29e4db350ca027afdf444b4392776f13a21abc65860a02921f2f3aa75f"} Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.634171 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.634719 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.637316 5133 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73" exitCode=255 Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.637395 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73"} Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.637439 5133 scope.go:117] "RemoveContainer" containerID="f53777cc5019e8fd3118cc1369a41c30ac4f1ad998d703999d0ece15a3f65fd9" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.652230 5133 scope.go:117] "RemoveContainer" containerID="5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73" Nov 21 13:42:48 crc kubenswrapper[5133]: E1121 13:42:48.652470 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.656472 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.659467 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.671013 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.689975 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.709258 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.720535 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.731473 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.787202 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:42:48 crc kubenswrapper[5133]: E1121 13:42:48.787411 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 13:42:49.787390448 +0000 UTC m=+29.585222686 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.888705 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.888758 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.888798 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:42:48 crc kubenswrapper[5133]: I1121 13:42:48.888825 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:42:48 crc kubenswrapper[5133]: E1121 13:42:48.888914 5133 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 21 13:42:48 crc kubenswrapper[5133]: E1121 13:42:48.888916 5133 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 21 13:42:48 crc kubenswrapper[5133]: E1121 13:42:48.888947 5133 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 21 13:42:48 crc kubenswrapper[5133]: E1121 13:42:48.888961 5133 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 13:42:48 crc kubenswrapper[5133]: E1121 13:42:48.888971 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-21 13:42:49.888954275 +0000 UTC m=+29.686786533 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 21 13:42:48 crc kubenswrapper[5133]: E1121 13:42:48.889026 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-21 13:42:49.888987345 +0000 UTC m=+29.686819603 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 13:42:48 crc kubenswrapper[5133]: E1121 13:42:48.889088 5133 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 21 13:42:48 crc kubenswrapper[5133]: E1121 13:42:48.889094 5133 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 21 13:42:48 crc kubenswrapper[5133]: E1121 13:42:48.889163 5133 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 21 13:42:48 crc kubenswrapper[5133]: E1121 13:42:48.889179 5133 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 13:42:48 crc kubenswrapper[5133]: E1121 13:42:48.889136 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-21 13:42:49.889120689 +0000 UTC m=+29.686952947 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 21 13:42:48 crc kubenswrapper[5133]: E1121 13:42:48.889257 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-21 13:42:49.889237792 +0000 UTC m=+29.687070060 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 13:42:49 crc kubenswrapper[5133]: I1121 13:42:49.457367 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:42:49 crc kubenswrapper[5133]: I1121 13:42:49.457427 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:42:49 crc kubenswrapper[5133]: E1121 13:42:49.457579 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 13:42:49 crc kubenswrapper[5133]: E1121 13:42:49.457687 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 13:42:49 crc kubenswrapper[5133]: I1121 13:42:49.644374 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"f1b5e12d17b3e683349818698223816569514a9f4ae5d14ba1f5661c472fce39"} Nov 21 13:42:49 crc kubenswrapper[5133]: I1121 13:42:49.647794 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"ede95ef8b82acda5cadd081a37fcb2a35fab8269c7ec403bb33a6feb8bf9eb88"} Nov 21 13:42:49 crc kubenswrapper[5133]: I1121 13:42:49.647843 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"d3037d1f01bc9704cae9aa3eb4760e4dc737b1990a6ae5a007d3ec412efad85a"} Nov 21 13:42:49 crc kubenswrapper[5133]: I1121 13:42:49.651243 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Nov 21 13:42:49 crc kubenswrapper[5133]: I1121 13:42:49.654957 5133 scope.go:117] "RemoveContainer" containerID="5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73" Nov 21 13:42:49 crc kubenswrapper[5133]: E1121 13:42:49.655297 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Nov 21 13:42:49 crc kubenswrapper[5133]: I1121 13:42:49.671200 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f826000-be5b-4f8f-bdc5-b80e11bb5e65\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cac876542527f108f89313704d6275aed6b735176f7f38b0fccbfcd79fdbf6e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa9a560543d545bd50cbb9aa0e907a992f9b3afb36de7ec5e72010dd835d2574\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c86055d41460f757efc29eaa62834faf3f14f9ca5ba534479d0fcd0a43d3bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f53777cc5019e8fd3118cc1369a41c30ac4f1ad998d703999d0ece15a3f65fd9\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T13:42:32Z\\\",\\\"message\\\":\\\"W1121 13:42:31.345977 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1121 13:42:31.346425 1 crypto.go:601] Generating new CA for check-endpoints-signer@1763732551 cert, and key in /tmp/serving-cert-1479645967/serving-signer.crt, /tmp/serving-cert-1479645967/serving-signer.key\\\\nI1121 13:42:31.566293 1 observer_polling.go:159] Starting file observer\\\\nW1121 13:42:31.570637 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1121 13:42:31.570817 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 13:42:31.572585 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1479645967/tls.crt::/tmp/serving-cert-1479645967/tls.key\\\\\\\"\\\\nF1121 13:42:32.020995 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:30Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T13:42:47Z\\\",\\\"message\\\":\\\"le observer\\\\nW1121 13:42:47.565555 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 13:42:47.567527 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 13:42:47.569658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3912986073/tls.crt::/tmp/serving-cert-3912986073/tls.key\\\\\\\"\\\\nI1121 13:42:47.852533 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 13:42:47.856751 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 13:42:47.856781 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 13:42:47.856814 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 13:42:47.856821 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 13:42:47.862211 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 13:42:47.862280 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862290 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862309 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 13:42:47.862319 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 13:42:47.862326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 13:42:47.862333 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 13:42:47.863057 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 13:42:47.865438 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6b16c41d8bc248fc4de65102a71d3875d1ab768432f61581605fa487ebfc9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:49Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:49 crc kubenswrapper[5133]: I1121 13:42:49.693213 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:49Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:49 crc kubenswrapper[5133]: I1121 13:42:49.710980 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:49Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:49 crc kubenswrapper[5133]: I1121 13:42:49.727272 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:49Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:49 crc kubenswrapper[5133]: I1121 13:42:49.740442 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:49Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:49 crc kubenswrapper[5133]: I1121 13:42:49.753114 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1b5e12d17b3e683349818698223816569514a9f4ae5d14ba1f5661c472fce39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:49Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:49 crc kubenswrapper[5133]: I1121 13:42:49.765967 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:49Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:49 crc kubenswrapper[5133]: I1121 13:42:49.779755 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f826000-be5b-4f8f-bdc5-b80e11bb5e65\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cac876542527f108f89313704d6275aed6b735176f7f38b0fccbfcd79fdbf6e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa9a560543d545bd50cbb9aa0e907a992f9b3afb36de7ec5e72010dd835d2574\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c86055d41460f757efc29eaa62834faf3f14f9ca5ba534479d0fcd0a43d3bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T13:42:47Z\\\",\\\"message\\\":\\\"le observer\\\\nW1121 13:42:47.565555 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 13:42:47.567527 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 13:42:47.569658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3912986073/tls.crt::/tmp/serving-cert-3912986073/tls.key\\\\\\\"\\\\nI1121 13:42:47.852533 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 13:42:47.856751 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 13:42:47.856781 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 13:42:47.856814 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 13:42:47.856821 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 13:42:47.862211 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 13:42:47.862280 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862290 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862309 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 13:42:47.862319 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 13:42:47.862326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 13:42:47.862333 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 13:42:47.863057 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 13:42:47.865438 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6b16c41d8bc248fc4de65102a71d3875d1ab768432f61581605fa487ebfc9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:49Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:49 crc kubenswrapper[5133]: I1121 13:42:49.797188 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:42:49 crc kubenswrapper[5133]: E1121 13:42:49.797415 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 13:42:51.797386039 +0000 UTC m=+31.595218327 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:42:49 crc kubenswrapper[5133]: I1121 13:42:49.798027 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede95ef8b82acda5cadd081a37fcb2a35fab8269c7ec403bb33a6feb8bf9eb88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3037d1f01bc9704cae9aa3eb4760e4dc737b1990a6ae5a007d3ec412efad85a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:49Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:49 crc kubenswrapper[5133]: I1121 13:42:49.810929 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:49Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:49 crc kubenswrapper[5133]: I1121 13:42:49.835185 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:49Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:49 crc kubenswrapper[5133]: I1121 13:42:49.851140 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:49Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:49 crc kubenswrapper[5133]: I1121 13:42:49.866147 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1b5e12d17b3e683349818698223816569514a9f4ae5d14ba1f5661c472fce39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:49Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:49 crc kubenswrapper[5133]: I1121 13:42:49.877589 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:49Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:49 crc kubenswrapper[5133]: I1121 13:42:49.897963 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:42:49 crc kubenswrapper[5133]: I1121 13:42:49.898032 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:42:49 crc kubenswrapper[5133]: I1121 13:42:49.898061 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:42:49 crc kubenswrapper[5133]: I1121 13:42:49.898094 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:42:49 crc kubenswrapper[5133]: E1121 13:42:49.898171 5133 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 21 13:42:49 crc kubenswrapper[5133]: E1121 13:42:49.898202 5133 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 21 13:42:49 crc kubenswrapper[5133]: E1121 13:42:49.898213 5133 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 21 13:42:49 crc kubenswrapper[5133]: E1121 13:42:49.898218 5133 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 13:42:49 crc kubenswrapper[5133]: E1121 13:42:49.898239 5133 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 21 13:42:49 crc kubenswrapper[5133]: E1121 13:42:49.898284 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-21 13:42:51.898265127 +0000 UTC m=+31.696097375 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 21 13:42:49 crc kubenswrapper[5133]: E1121 13:42:49.898246 5133 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 21 13:42:49 crc kubenswrapper[5133]: E1121 13:42:49.898332 5133 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 21 13:42:49 crc kubenswrapper[5133]: E1121 13:42:49.898351 5133 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 13:42:49 crc kubenswrapper[5133]: E1121 13:42:49.898309 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-21 13:42:51.898299988 +0000 UTC m=+31.696132236 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 13:42:49 crc kubenswrapper[5133]: E1121 13:42:49.898433 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-21 13:42:51.898407551 +0000 UTC m=+31.696239839 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 21 13:42:49 crc kubenswrapper[5133]: E1121 13:42:49.898455 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-21 13:42:51.898442942 +0000 UTC m=+31.696275230 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 13:42:50 crc kubenswrapper[5133]: I1121 13:42:50.416389 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 21 13:42:50 crc kubenswrapper[5133]: I1121 13:42:50.424511 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 21 13:42:50 crc kubenswrapper[5133]: I1121 13:42:50.431030 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Nov 21 13:42:50 crc kubenswrapper[5133]: I1121 13:42:50.435987 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f826000-be5b-4f8f-bdc5-b80e11bb5e65\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cac876542527f108f89313704d6275aed6b735176f7f38b0fccbfcd79fdbf6e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa9a560543d545bd50cbb9aa0e907a992f9b3afb36de7ec5e72010dd835d2574\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c86055d41460f757efc29eaa62834faf3f14f9ca5ba534479d0fcd0a43d3bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T13:42:47Z\\\",\\\"message\\\":\\\"le observer\\\\nW1121 13:42:47.565555 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 13:42:47.567527 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 13:42:47.569658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3912986073/tls.crt::/tmp/serving-cert-3912986073/tls.key\\\\\\\"\\\\nI1121 13:42:47.852533 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 13:42:47.856751 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 13:42:47.856781 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 13:42:47.856814 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 13:42:47.856821 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 13:42:47.862211 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 13:42:47.862280 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862290 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862309 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 13:42:47.862319 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 13:42:47.862326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 13:42:47.862333 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 13:42:47.863057 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 13:42:47.865438 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6b16c41d8bc248fc4de65102a71d3875d1ab768432f61581605fa487ebfc9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:50Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:50 crc kubenswrapper[5133]: I1121 13:42:50.451146 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede95ef8b82acda5cadd081a37fcb2a35fab8269c7ec403bb33a6feb8bf9eb88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3037d1f01bc9704cae9aa3eb4760e4dc737b1990a6ae5a007d3ec412efad85a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:50Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:50 crc kubenswrapper[5133]: I1121 13:42:50.457887 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:42:50 crc kubenswrapper[5133]: E1121 13:42:50.458138 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 13:42:50 crc kubenswrapper[5133]: I1121 13:42:50.470651 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:50Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:50 crc kubenswrapper[5133]: I1121 13:42:50.522487 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:50Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:50 crc kubenswrapper[5133]: I1121 13:42:50.543236 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:50Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:50 crc kubenswrapper[5133]: I1121 13:42:50.569185 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:50Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:50 crc kubenswrapper[5133]: I1121 13:42:50.597734 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1b5e12d17b3e683349818698223816569514a9f4ae5d14ba1f5661c472fce39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:50Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:50 crc kubenswrapper[5133]: I1121 13:42:50.621812 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f826000-be5b-4f8f-bdc5-b80e11bb5e65\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cac876542527f108f89313704d6275aed6b735176f7f38b0fccbfcd79fdbf6e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa9a560543d545bd50cbb9aa0e907a992f9b3afb36de7ec5e72010dd835d2574\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c86055d41460f757efc29eaa62834faf3f14f9ca5ba534479d0fcd0a43d3bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T13:42:47Z\\\",\\\"message\\\":\\\"le observer\\\\nW1121 13:42:47.565555 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 13:42:47.567527 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 13:42:47.569658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3912986073/tls.crt::/tmp/serving-cert-3912986073/tls.key\\\\\\\"\\\\nI1121 13:42:47.852533 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 13:42:47.856751 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 13:42:47.856781 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 13:42:47.856814 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 13:42:47.856821 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 13:42:47.862211 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 13:42:47.862280 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862290 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862309 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 13:42:47.862319 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 13:42:47.862326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 13:42:47.862333 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 13:42:47.863057 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 13:42:47.865438 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6b16c41d8bc248fc4de65102a71d3875d1ab768432f61581605fa487ebfc9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:50Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:50 crc kubenswrapper[5133]: I1121 13:42:50.639459 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede95ef8b82acda5cadd081a37fcb2a35fab8269c7ec403bb33a6feb8bf9eb88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3037d1f01bc9704cae9aa3eb4760e4dc737b1990a6ae5a007d3ec412efad85a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:50Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:50 crc kubenswrapper[5133]: I1121 13:42:50.656620 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:50Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:50 crc kubenswrapper[5133]: I1121 13:42:50.679091 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:50Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:50 crc kubenswrapper[5133]: I1121 13:42:50.695805 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:50Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:50 crc kubenswrapper[5133]: I1121 13:42:50.738607 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c335cd8-618b-4871-a0e2-deaa61ddc49a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5f6320dbfb8d910e52de319fe5350b435c1c9f00a2e1d5b2b953fb6d1688984\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://273a23deb0bee7d80bc12f28a4056a5b843e81cc7c411273e49c3aa0fdba5182\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c93dddad8f7a853e1302ba96f3fc6d6626b22de64c8cfd1ee63996820d0816cd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebb7634a6507b2323d36c3d57b19c374862e0bada0e81150da9db315e5812f12\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:50Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:50 crc kubenswrapper[5133]: I1121 13:42:50.767328 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:50Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:50 crc kubenswrapper[5133]: I1121 13:42:50.780188 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1b5e12d17b3e683349818698223816569514a9f4ae5d14ba1f5661c472fce39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:50Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:51 crc kubenswrapper[5133]: I1121 13:42:51.457155 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:42:51 crc kubenswrapper[5133]: E1121 13:42:51.457575 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 13:42:51 crc kubenswrapper[5133]: I1121 13:42:51.457237 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:42:51 crc kubenswrapper[5133]: E1121 13:42:51.457781 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 13:42:51 crc kubenswrapper[5133]: I1121 13:42:51.661684 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"c1236e5c5c7c8db59fd2faa688e9b781fe94721cc8aa644dd9ab91df2684c617"} Nov 21 13:42:51 crc kubenswrapper[5133]: I1121 13:42:51.678887 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c335cd8-618b-4871-a0e2-deaa61ddc49a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5f6320dbfb8d910e52de319fe5350b435c1c9f00a2e1d5b2b953fb6d1688984\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://273a23deb0bee7d80bc12f28a4056a5b843e81cc7c411273e49c3aa0fdba5182\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c93dddad8f7a853e1302ba96f3fc6d6626b22de64c8cfd1ee63996820d0816cd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebb7634a6507b2323d36c3d57b19c374862e0bada0e81150da9db315e5812f12\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:51Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:51 crc kubenswrapper[5133]: I1121 13:42:51.700043 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:51Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:51 crc kubenswrapper[5133]: I1121 13:42:51.730190 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1b5e12d17b3e683349818698223816569514a9f4ae5d14ba1f5661c472fce39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:51Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:51 crc kubenswrapper[5133]: I1121 13:42:51.750306 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:51Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:51 crc kubenswrapper[5133]: I1121 13:42:51.771543 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:51Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:51 crc kubenswrapper[5133]: I1121 13:42:51.790915 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1236e5c5c7c8db59fd2faa688e9b781fe94721cc8aa644dd9ab91df2684c617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:51Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:51 crc kubenswrapper[5133]: I1121 13:42:51.814071 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f826000-be5b-4f8f-bdc5-b80e11bb5e65\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cac876542527f108f89313704d6275aed6b735176f7f38b0fccbfcd79fdbf6e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa9a560543d545bd50cbb9aa0e907a992f9b3afb36de7ec5e72010dd835d2574\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c86055d41460f757efc29eaa62834faf3f14f9ca5ba534479d0fcd0a43d3bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T13:42:47Z\\\",\\\"message\\\":\\\"le observer\\\\nW1121 13:42:47.565555 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 13:42:47.567527 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 13:42:47.569658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3912986073/tls.crt::/tmp/serving-cert-3912986073/tls.key\\\\\\\"\\\\nI1121 13:42:47.852533 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 13:42:47.856751 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 13:42:47.856781 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 13:42:47.856814 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 13:42:47.856821 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 13:42:47.862211 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 13:42:47.862280 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862290 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862309 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 13:42:47.862319 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 13:42:47.862326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 13:42:47.862333 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 13:42:47.863057 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 13:42:47.865438 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6b16c41d8bc248fc4de65102a71d3875d1ab768432f61581605fa487ebfc9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:51Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:51 crc kubenswrapper[5133]: I1121 13:42:51.834967 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:42:51 crc kubenswrapper[5133]: E1121 13:42:51.835228 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 13:42:55.83517548 +0000 UTC m=+35.633007768 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:42:51 crc kubenswrapper[5133]: I1121 13:42:51.840270 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede95ef8b82acda5cadd081a37fcb2a35fab8269c7ec403bb33a6feb8bf9eb88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3037d1f01bc9704cae9aa3eb4760e4dc737b1990a6ae5a007d3ec412efad85a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:51Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:51 crc kubenswrapper[5133]: I1121 13:42:51.936444 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:42:51 crc kubenswrapper[5133]: I1121 13:42:51.936514 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:42:51 crc kubenswrapper[5133]: I1121 13:42:51.936562 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:42:51 crc kubenswrapper[5133]: I1121 13:42:51.936603 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:42:51 crc kubenswrapper[5133]: E1121 13:42:51.936716 5133 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 21 13:42:51 crc kubenswrapper[5133]: E1121 13:42:51.936712 5133 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 21 13:42:51 crc kubenswrapper[5133]: E1121 13:42:51.936775 5133 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 21 13:42:51 crc kubenswrapper[5133]: E1121 13:42:51.936795 5133 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 13:42:51 crc kubenswrapper[5133]: E1121 13:42:51.936800 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-21 13:42:55.936777838 +0000 UTC m=+35.734610126 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 21 13:42:51 crc kubenswrapper[5133]: E1121 13:42:51.936719 5133 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 21 13:42:51 crc kubenswrapper[5133]: E1121 13:42:51.936878 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-21 13:42:55.93685058 +0000 UTC m=+35.734682868 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 13:42:51 crc kubenswrapper[5133]: E1121 13:42:51.936933 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-21 13:42:55.936904571 +0000 UTC m=+35.734736939 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 21 13:42:51 crc kubenswrapper[5133]: E1121 13:42:51.936731 5133 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 21 13:42:51 crc kubenswrapper[5133]: E1121 13:42:51.936979 5133 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 21 13:42:51 crc kubenswrapper[5133]: E1121 13:42:51.937042 5133 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 13:42:51 crc kubenswrapper[5133]: E1121 13:42:51.937128 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-21 13:42:55.937105757 +0000 UTC m=+35.734938175 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 13:42:52 crc kubenswrapper[5133]: I1121 13:42:52.456638 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:42:52 crc kubenswrapper[5133]: E1121 13:42:52.456781 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 13:42:52 crc kubenswrapper[5133]: I1121 13:42:52.460253 5133 csr.go:261] certificate signing request csr-rqt7h is approved, waiting to be issued Nov 21 13:42:52 crc kubenswrapper[5133]: I1121 13:42:52.475663 5133 csr.go:257] certificate signing request csr-rqt7h is issued Nov 21 13:42:52 crc kubenswrapper[5133]: I1121 13:42:52.479060 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede95ef8b82acda5cadd081a37fcb2a35fab8269c7ec403bb33a6feb8bf9eb88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3037d1f01bc9704cae9aa3eb4760e4dc737b1990a6ae5a007d3ec412efad85a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:52Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:52 crc kubenswrapper[5133]: I1121 13:42:52.492194 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/node-resolver-pvdwc"] Nov 21 13:42:52 crc kubenswrapper[5133]: I1121 13:42:52.492662 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-pvdwc" Nov 21 13:42:52 crc kubenswrapper[5133]: I1121 13:42:52.500893 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 21 13:42:52 crc kubenswrapper[5133]: I1121 13:42:52.500952 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 21 13:42:52 crc kubenswrapper[5133]: I1121 13:42:52.503417 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 21 13:42:52 crc kubenswrapper[5133]: I1121 13:42:52.503881 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:52Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:52 crc kubenswrapper[5133]: I1121 13:42:52.516062 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:52Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:52 crc kubenswrapper[5133]: I1121 13:42:52.530796 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1236e5c5c7c8db59fd2faa688e9b781fe94721cc8aa644dd9ab91df2684c617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:52Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:52 crc kubenswrapper[5133]: I1121 13:42:52.542568 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f826000-be5b-4f8f-bdc5-b80e11bb5e65\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cac876542527f108f89313704d6275aed6b735176f7f38b0fccbfcd79fdbf6e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa9a560543d545bd50cbb9aa0e907a992f9b3afb36de7ec5e72010dd835d2574\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c86055d41460f757efc29eaa62834faf3f14f9ca5ba534479d0fcd0a43d3bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T13:42:47Z\\\",\\\"message\\\":\\\"le observer\\\\nW1121 13:42:47.565555 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 13:42:47.567527 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 13:42:47.569658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3912986073/tls.crt::/tmp/serving-cert-3912986073/tls.key\\\\\\\"\\\\nI1121 13:42:47.852533 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 13:42:47.856751 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 13:42:47.856781 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 13:42:47.856814 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 13:42:47.856821 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 13:42:47.862211 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 13:42:47.862280 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862290 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862309 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 13:42:47.862319 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 13:42:47.862326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 13:42:47.862333 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 13:42:47.863057 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 13:42:47.865438 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6b16c41d8bc248fc4de65102a71d3875d1ab768432f61581605fa487ebfc9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:52Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:52 crc kubenswrapper[5133]: I1121 13:42:52.556747 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c335cd8-618b-4871-a0e2-deaa61ddc49a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5f6320dbfb8d910e52de319fe5350b435c1c9f00a2e1d5b2b953fb6d1688984\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://273a23deb0bee7d80bc12f28a4056a5b843e81cc7c411273e49c3aa0fdba5182\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c93dddad8f7a853e1302ba96f3fc6d6626b22de64c8cfd1ee63996820d0816cd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebb7634a6507b2323d36c3d57b19c374862e0bada0e81150da9db315e5812f12\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:52Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:52 crc kubenswrapper[5133]: I1121 13:42:52.566630 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:52Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:52 crc kubenswrapper[5133]: I1121 13:42:52.583577 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1b5e12d17b3e683349818698223816569514a9f4ae5d14ba1f5661c472fce39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:52Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:52 crc kubenswrapper[5133]: I1121 13:42:52.595557 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1b5e12d17b3e683349818698223816569514a9f4ae5d14ba1f5661c472fce39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:52Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:52 crc kubenswrapper[5133]: I1121 13:42:52.606094 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c335cd8-618b-4871-a0e2-deaa61ddc49a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5f6320dbfb8d910e52de319fe5350b435c1c9f00a2e1d5b2b953fb6d1688984\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://273a23deb0bee7d80bc12f28a4056a5b843e81cc7c411273e49c3aa0fdba5182\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c93dddad8f7a853e1302ba96f3fc6d6626b22de64c8cfd1ee63996820d0816cd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebb7634a6507b2323d36c3d57b19c374862e0bada0e81150da9db315e5812f12\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:52Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:52 crc kubenswrapper[5133]: I1121 13:42:52.620560 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:52Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:52 crc kubenswrapper[5133]: I1121 13:42:52.630472 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pvdwc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87822156-53e8-4eb5-b241-db506a21a1b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jzt65\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pvdwc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:52Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:52 crc kubenswrapper[5133]: I1121 13:42:52.641877 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f826000-be5b-4f8f-bdc5-b80e11bb5e65\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cac876542527f108f89313704d6275aed6b735176f7f38b0fccbfcd79fdbf6e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa9a560543d545bd50cbb9aa0e907a992f9b3afb36de7ec5e72010dd835d2574\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c86055d41460f757efc29eaa62834faf3f14f9ca5ba534479d0fcd0a43d3bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T13:42:47Z\\\",\\\"message\\\":\\\"le observer\\\\nW1121 13:42:47.565555 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 13:42:47.567527 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 13:42:47.569658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3912986073/tls.crt::/tmp/serving-cert-3912986073/tls.key\\\\\\\"\\\\nI1121 13:42:47.852533 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 13:42:47.856751 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 13:42:47.856781 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 13:42:47.856814 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 13:42:47.856821 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 13:42:47.862211 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 13:42:47.862280 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862290 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862309 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 13:42:47.862319 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 13:42:47.862326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 13:42:47.862333 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 13:42:47.863057 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 13:42:47.865438 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6b16c41d8bc248fc4de65102a71d3875d1ab768432f61581605fa487ebfc9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:52Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:52 crc kubenswrapper[5133]: I1121 13:42:52.643987 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jzt65\" (UniqueName: \"kubernetes.io/projected/87822156-53e8-4eb5-b241-db506a21a1b9-kube-api-access-jzt65\") pod \"node-resolver-pvdwc\" (UID: \"87822156-53e8-4eb5-b241-db506a21a1b9\") " pod="openshift-dns/node-resolver-pvdwc" Nov 21 13:42:52 crc kubenswrapper[5133]: I1121 13:42:52.644079 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/87822156-53e8-4eb5-b241-db506a21a1b9-hosts-file\") pod \"node-resolver-pvdwc\" (UID: \"87822156-53e8-4eb5-b241-db506a21a1b9\") " pod="openshift-dns/node-resolver-pvdwc" Nov 21 13:42:52 crc kubenswrapper[5133]: I1121 13:42:52.653244 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede95ef8b82acda5cadd081a37fcb2a35fab8269c7ec403bb33a6feb8bf9eb88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3037d1f01bc9704cae9aa3eb4760e4dc737b1990a6ae5a007d3ec412efad85a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:52Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:52 crc kubenswrapper[5133]: I1121 13:42:52.665661 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:52Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:52 crc kubenswrapper[5133]: I1121 13:42:52.679989 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:52Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:52 crc kubenswrapper[5133]: I1121 13:42:52.701670 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1236e5c5c7c8db59fd2faa688e9b781fe94721cc8aa644dd9ab91df2684c617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:52Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:52 crc kubenswrapper[5133]: I1121 13:42:52.745240 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/87822156-53e8-4eb5-b241-db506a21a1b9-hosts-file\") pod \"node-resolver-pvdwc\" (UID: \"87822156-53e8-4eb5-b241-db506a21a1b9\") " pod="openshift-dns/node-resolver-pvdwc" Nov 21 13:42:52 crc kubenswrapper[5133]: I1121 13:42:52.745347 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jzt65\" (UniqueName: \"kubernetes.io/projected/87822156-53e8-4eb5-b241-db506a21a1b9-kube-api-access-jzt65\") pod \"node-resolver-pvdwc\" (UID: \"87822156-53e8-4eb5-b241-db506a21a1b9\") " pod="openshift-dns/node-resolver-pvdwc" Nov 21 13:42:52 crc kubenswrapper[5133]: I1121 13:42:52.745559 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/87822156-53e8-4eb5-b241-db506a21a1b9-hosts-file\") pod \"node-resolver-pvdwc\" (UID: \"87822156-53e8-4eb5-b241-db506a21a1b9\") " pod="openshift-dns/node-resolver-pvdwc" Nov 21 13:42:52 crc kubenswrapper[5133]: I1121 13:42:52.763534 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jzt65\" (UniqueName: \"kubernetes.io/projected/87822156-53e8-4eb5-b241-db506a21a1b9-kube-api-access-jzt65\") pod \"node-resolver-pvdwc\" (UID: \"87822156-53e8-4eb5-b241-db506a21a1b9\") " pod="openshift-dns/node-resolver-pvdwc" Nov 21 13:42:52 crc kubenswrapper[5133]: I1121 13:42:52.803610 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-pvdwc" Nov 21 13:42:52 crc kubenswrapper[5133]: W1121 13:42:52.814556 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod87822156_53e8_4eb5_b241_db506a21a1b9.slice/crio-4faf62198dec7c71a7587dfc42f10d11184d7d830ff489f2c8457b0fecc4a492 WatchSource:0}: Error finding container 4faf62198dec7c71a7587dfc42f10d11184d7d830ff489f2c8457b0fecc4a492: Status 404 returned error can't find the container with id 4faf62198dec7c71a7587dfc42f10d11184d7d830ff489f2c8457b0fecc4a492 Nov 21 13:42:52 crc kubenswrapper[5133]: I1121 13:42:52.939106 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-xxlvp"] Nov 21 13:42:52 crc kubenswrapper[5133]: I1121 13:42:52.939466 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-m5d24"] Nov 21 13:42:52 crc kubenswrapper[5133]: I1121 13:42:52.939686 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-m5d24" Nov 21 13:42:52 crc kubenswrapper[5133]: I1121 13:42:52.939819 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" Nov 21 13:42:52 crc kubenswrapper[5133]: I1121 13:42:52.942709 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 21 13:42:52 crc kubenswrapper[5133]: I1121 13:42:52.943416 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 21 13:42:52 crc kubenswrapper[5133]: I1121 13:42:52.943436 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 21 13:42:52 crc kubenswrapper[5133]: I1121 13:42:52.943480 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 21 13:42:52 crc kubenswrapper[5133]: I1121 13:42:52.943431 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 21 13:42:52 crc kubenswrapper[5133]: I1121 13:42:52.943697 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 21 13:42:52 crc kubenswrapper[5133]: I1121 13:42:52.945234 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 21 13:42:52 crc kubenswrapper[5133]: I1121 13:42:52.945339 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 21 13:42:52 crc kubenswrapper[5133]: I1121 13:42:52.945912 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 21 13:42:52 crc kubenswrapper[5133]: I1121 13:42:52.946566 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 21 13:42:52 crc kubenswrapper[5133]: I1121 13:42:52.959032 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f5a729-05d1-4f84-a216-1df3233af57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xxlvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:52Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:52 crc kubenswrapper[5133]: I1121 13:42:52.977109 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f826000-be5b-4f8f-bdc5-b80e11bb5e65\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cac876542527f108f89313704d6275aed6b735176f7f38b0fccbfcd79fdbf6e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa9a560543d545bd50cbb9aa0e907a992f9b3afb36de7ec5e72010dd835d2574\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c86055d41460f757efc29eaa62834faf3f14f9ca5ba534479d0fcd0a43d3bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T13:42:47Z\\\",\\\"message\\\":\\\"le observer\\\\nW1121 13:42:47.565555 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 13:42:47.567527 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 13:42:47.569658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3912986073/tls.crt::/tmp/serving-cert-3912986073/tls.key\\\\\\\"\\\\nI1121 13:42:47.852533 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 13:42:47.856751 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 13:42:47.856781 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 13:42:47.856814 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 13:42:47.856821 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 13:42:47.862211 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 13:42:47.862280 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862290 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862309 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 13:42:47.862319 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 13:42:47.862326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 13:42:47.862333 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 13:42:47.863057 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 13:42:47.865438 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6b16c41d8bc248fc4de65102a71d3875d1ab768432f61581605fa487ebfc9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:52Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:52 crc kubenswrapper[5133]: I1121 13:42:52.992503 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede95ef8b82acda5cadd081a37fcb2a35fab8269c7ec403bb33a6feb8bf9eb88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3037d1f01bc9704cae9aa3eb4760e4dc737b1990a6ae5a007d3ec412efad85a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:52Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.004539 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.016759 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1236e5c5c7c8db59fd2faa688e9b781fe94721cc8aa644dd9ab91df2684c617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.029400 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m5d24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0077329a-abad-4c6d-a601-2dc01fd83184\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmd8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m5d24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.045830 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.048641 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/0077329a-abad-4c6d-a601-2dc01fd83184-host-var-lib-kubelet\") pod \"multus-m5d24\" (UID: \"0077329a-abad-4c6d-a601-2dc01fd83184\") " pod="openshift-multus/multus-m5d24" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.048774 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/0077329a-abad-4c6d-a601-2dc01fd83184-hostroot\") pod \"multus-m5d24\" (UID: \"0077329a-abad-4c6d-a601-2dc01fd83184\") " pod="openshift-multus/multus-m5d24" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.048861 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/0077329a-abad-4c6d-a601-2dc01fd83184-multus-cni-dir\") pod \"multus-m5d24\" (UID: \"0077329a-abad-4c6d-a601-2dc01fd83184\") " pod="openshift-multus/multus-m5d24" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.048941 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/0077329a-abad-4c6d-a601-2dc01fd83184-cnibin\") pod \"multus-m5d24\" (UID: \"0077329a-abad-4c6d-a601-2dc01fd83184\") " pod="openshift-multus/multus-m5d24" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.049036 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/0077329a-abad-4c6d-a601-2dc01fd83184-host-run-k8s-cni-cncf-io\") pod \"multus-m5d24\" (UID: \"0077329a-abad-4c6d-a601-2dc01fd83184\") " pod="openshift-multus/multus-m5d24" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.049106 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0077329a-abad-4c6d-a601-2dc01fd83184-etc-kubernetes\") pod \"multus-m5d24\" (UID: \"0077329a-abad-4c6d-a601-2dc01fd83184\") " pod="openshift-multus/multus-m5d24" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.049188 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/52f5a729-05d1-4f84-a216-1df3233af57d-proxy-tls\") pod \"machine-config-daemon-xxlvp\" (UID: \"52f5a729-05d1-4f84-a216-1df3233af57d\") " pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.049265 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/52f5a729-05d1-4f84-a216-1df3233af57d-mcd-auth-proxy-config\") pod \"machine-config-daemon-xxlvp\" (UID: \"52f5a729-05d1-4f84-a216-1df3233af57d\") " pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.049362 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/0077329a-abad-4c6d-a601-2dc01fd83184-host-var-lib-cni-bin\") pod \"multus-m5d24\" (UID: \"0077329a-abad-4c6d-a601-2dc01fd83184\") " pod="openshift-multus/multus-m5d24" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.049437 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/0077329a-abad-4c6d-a601-2dc01fd83184-host-var-lib-cni-multus\") pod \"multus-m5d24\" (UID: \"0077329a-abad-4c6d-a601-2dc01fd83184\") " pod="openshift-multus/multus-m5d24" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.049514 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/0077329a-abad-4c6d-a601-2dc01fd83184-multus-daemon-config\") pod \"multus-m5d24\" (UID: \"0077329a-abad-4c6d-a601-2dc01fd83184\") " pod="openshift-multus/multus-m5d24" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.049590 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/0077329a-abad-4c6d-a601-2dc01fd83184-host-run-multus-certs\") pod \"multus-m5d24\" (UID: \"0077329a-abad-4c6d-a601-2dc01fd83184\") " pod="openshift-multus/multus-m5d24" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.049676 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/52f5a729-05d1-4f84-a216-1df3233af57d-rootfs\") pod \"machine-config-daemon-xxlvp\" (UID: \"52f5a729-05d1-4f84-a216-1df3233af57d\") " pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.049753 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/0077329a-abad-4c6d-a601-2dc01fd83184-multus-conf-dir\") pod \"multus-m5d24\" (UID: \"0077329a-abad-4c6d-a601-2dc01fd83184\") " pod="openshift-multus/multus-m5d24" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.049877 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/0077329a-abad-4c6d-a601-2dc01fd83184-host-run-netns\") pod \"multus-m5d24\" (UID: \"0077329a-abad-4c6d-a601-2dc01fd83184\") " pod="openshift-multus/multus-m5d24" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.049954 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/0077329a-abad-4c6d-a601-2dc01fd83184-os-release\") pod \"multus-m5d24\" (UID: \"0077329a-abad-4c6d-a601-2dc01fd83184\") " pod="openshift-multus/multus-m5d24" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.050048 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/0077329a-abad-4c6d-a601-2dc01fd83184-cni-binary-copy\") pod \"multus-m5d24\" (UID: \"0077329a-abad-4c6d-a601-2dc01fd83184\") " pod="openshift-multus/multus-m5d24" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.050131 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lmd8c\" (UniqueName: \"kubernetes.io/projected/0077329a-abad-4c6d-a601-2dc01fd83184-kube-api-access-lmd8c\") pod \"multus-m5d24\" (UID: \"0077329a-abad-4c6d-a601-2dc01fd83184\") " pod="openshift-multus/multus-m5d24" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.050212 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/0077329a-abad-4c6d-a601-2dc01fd83184-multus-socket-dir-parent\") pod \"multus-m5d24\" (UID: \"0077329a-abad-4c6d-a601-2dc01fd83184\") " pod="openshift-multus/multus-m5d24" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.050283 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4gnvx\" (UniqueName: \"kubernetes.io/projected/52f5a729-05d1-4f84-a216-1df3233af57d-kube-api-access-4gnvx\") pod \"machine-config-daemon-xxlvp\" (UID: \"52f5a729-05d1-4f84-a216-1df3233af57d\") " pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.050366 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/0077329a-abad-4c6d-a601-2dc01fd83184-system-cni-dir\") pod \"multus-m5d24\" (UID: \"0077329a-abad-4c6d-a601-2dc01fd83184\") " pod="openshift-multus/multus-m5d24" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.068302 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.086868 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pvdwc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87822156-53e8-4eb5-b241-db506a21a1b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jzt65\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pvdwc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.105513 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c335cd8-618b-4871-a0e2-deaa61ddc49a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5f6320dbfb8d910e52de319fe5350b435c1c9f00a2e1d5b2b953fb6d1688984\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://273a23deb0bee7d80bc12f28a4056a5b843e81cc7c411273e49c3aa0fdba5182\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c93dddad8f7a853e1302ba96f3fc6d6626b22de64c8cfd1ee63996820d0816cd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebb7634a6507b2323d36c3d57b19c374862e0bada0e81150da9db315e5812f12\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.123591 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1b5e12d17b3e683349818698223816569514a9f4ae5d14ba1f5661c472fce39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.140286 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.151161 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/0077329a-abad-4c6d-a601-2dc01fd83184-host-run-multus-certs\") pod \"multus-m5d24\" (UID: \"0077329a-abad-4c6d-a601-2dc01fd83184\") " pod="openshift-multus/multus-m5d24" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.151217 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/52f5a729-05d1-4f84-a216-1df3233af57d-rootfs\") pod \"machine-config-daemon-xxlvp\" (UID: \"52f5a729-05d1-4f84-a216-1df3233af57d\") " pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.151247 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/0077329a-abad-4c6d-a601-2dc01fd83184-host-run-netns\") pod \"multus-m5d24\" (UID: \"0077329a-abad-4c6d-a601-2dc01fd83184\") " pod="openshift-multus/multus-m5d24" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.151276 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/0077329a-abad-4c6d-a601-2dc01fd83184-multus-conf-dir\") pod \"multus-m5d24\" (UID: \"0077329a-abad-4c6d-a601-2dc01fd83184\") " pod="openshift-multus/multus-m5d24" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.151324 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/0077329a-abad-4c6d-a601-2dc01fd83184-os-release\") pod \"multus-m5d24\" (UID: \"0077329a-abad-4c6d-a601-2dc01fd83184\") " pod="openshift-multus/multus-m5d24" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.151346 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/0077329a-abad-4c6d-a601-2dc01fd83184-cni-binary-copy\") pod \"multus-m5d24\" (UID: \"0077329a-abad-4c6d-a601-2dc01fd83184\") " pod="openshift-multus/multus-m5d24" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.151369 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lmd8c\" (UniqueName: \"kubernetes.io/projected/0077329a-abad-4c6d-a601-2dc01fd83184-kube-api-access-lmd8c\") pod \"multus-m5d24\" (UID: \"0077329a-abad-4c6d-a601-2dc01fd83184\") " pod="openshift-multus/multus-m5d24" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.151392 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/0077329a-abad-4c6d-a601-2dc01fd83184-system-cni-dir\") pod \"multus-m5d24\" (UID: \"0077329a-abad-4c6d-a601-2dc01fd83184\") " pod="openshift-multus/multus-m5d24" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.151415 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/0077329a-abad-4c6d-a601-2dc01fd83184-multus-socket-dir-parent\") pod \"multus-m5d24\" (UID: \"0077329a-abad-4c6d-a601-2dc01fd83184\") " pod="openshift-multus/multus-m5d24" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.151455 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4gnvx\" (UniqueName: \"kubernetes.io/projected/52f5a729-05d1-4f84-a216-1df3233af57d-kube-api-access-4gnvx\") pod \"machine-config-daemon-xxlvp\" (UID: \"52f5a729-05d1-4f84-a216-1df3233af57d\") " pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.151495 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/0077329a-abad-4c6d-a601-2dc01fd83184-multus-cni-dir\") pod \"multus-m5d24\" (UID: \"0077329a-abad-4c6d-a601-2dc01fd83184\") " pod="openshift-multus/multus-m5d24" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.151520 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/0077329a-abad-4c6d-a601-2dc01fd83184-host-var-lib-kubelet\") pod \"multus-m5d24\" (UID: \"0077329a-abad-4c6d-a601-2dc01fd83184\") " pod="openshift-multus/multus-m5d24" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.151541 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/0077329a-abad-4c6d-a601-2dc01fd83184-hostroot\") pod \"multus-m5d24\" (UID: \"0077329a-abad-4c6d-a601-2dc01fd83184\") " pod="openshift-multus/multus-m5d24" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.151566 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/0077329a-abad-4c6d-a601-2dc01fd83184-cnibin\") pod \"multus-m5d24\" (UID: \"0077329a-abad-4c6d-a601-2dc01fd83184\") " pod="openshift-multus/multus-m5d24" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.151587 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/0077329a-abad-4c6d-a601-2dc01fd83184-host-run-k8s-cni-cncf-io\") pod \"multus-m5d24\" (UID: \"0077329a-abad-4c6d-a601-2dc01fd83184\") " pod="openshift-multus/multus-m5d24" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.151610 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0077329a-abad-4c6d-a601-2dc01fd83184-etc-kubernetes\") pod \"multus-m5d24\" (UID: \"0077329a-abad-4c6d-a601-2dc01fd83184\") " pod="openshift-multus/multus-m5d24" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.151633 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/52f5a729-05d1-4f84-a216-1df3233af57d-proxy-tls\") pod \"machine-config-daemon-xxlvp\" (UID: \"52f5a729-05d1-4f84-a216-1df3233af57d\") " pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.151665 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/0077329a-abad-4c6d-a601-2dc01fd83184-host-var-lib-cni-bin\") pod \"multus-m5d24\" (UID: \"0077329a-abad-4c6d-a601-2dc01fd83184\") " pod="openshift-multus/multus-m5d24" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.151687 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/0077329a-abad-4c6d-a601-2dc01fd83184-host-var-lib-cni-multus\") pod \"multus-m5d24\" (UID: \"0077329a-abad-4c6d-a601-2dc01fd83184\") " pod="openshift-multus/multus-m5d24" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.151711 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/0077329a-abad-4c6d-a601-2dc01fd83184-multus-daemon-config\") pod \"multus-m5d24\" (UID: \"0077329a-abad-4c6d-a601-2dc01fd83184\") " pod="openshift-multus/multus-m5d24" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.151741 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/52f5a729-05d1-4f84-a216-1df3233af57d-mcd-auth-proxy-config\") pod \"machine-config-daemon-xxlvp\" (UID: \"52f5a729-05d1-4f84-a216-1df3233af57d\") " pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.152063 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/0077329a-abad-4c6d-a601-2dc01fd83184-host-run-multus-certs\") pod \"multus-m5d24\" (UID: \"0077329a-abad-4c6d-a601-2dc01fd83184\") " pod="openshift-multus/multus-m5d24" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.152118 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/52f5a729-05d1-4f84-a216-1df3233af57d-rootfs\") pod \"machine-config-daemon-xxlvp\" (UID: \"52f5a729-05d1-4f84-a216-1df3233af57d\") " pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.152146 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/0077329a-abad-4c6d-a601-2dc01fd83184-host-run-netns\") pod \"multus-m5d24\" (UID: \"0077329a-abad-4c6d-a601-2dc01fd83184\") " pod="openshift-multus/multus-m5d24" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.152177 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/0077329a-abad-4c6d-a601-2dc01fd83184-multus-conf-dir\") pod \"multus-m5d24\" (UID: \"0077329a-abad-4c6d-a601-2dc01fd83184\") " pod="openshift-multus/multus-m5d24" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.152368 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/0077329a-abad-4c6d-a601-2dc01fd83184-os-release\") pod \"multus-m5d24\" (UID: \"0077329a-abad-4c6d-a601-2dc01fd83184\") " pod="openshift-multus/multus-m5d24" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.152691 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/0077329a-abad-4c6d-a601-2dc01fd83184-host-run-k8s-cni-cncf-io\") pod \"multus-m5d24\" (UID: \"0077329a-abad-4c6d-a601-2dc01fd83184\") " pod="openshift-multus/multus-m5d24" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.152693 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/0077329a-abad-4c6d-a601-2dc01fd83184-hostroot\") pod \"multus-m5d24\" (UID: \"0077329a-abad-4c6d-a601-2dc01fd83184\") " pod="openshift-multus/multus-m5d24" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.152840 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/0077329a-abad-4c6d-a601-2dc01fd83184-cnibin\") pod \"multus-m5d24\" (UID: \"0077329a-abad-4c6d-a601-2dc01fd83184\") " pod="openshift-multus/multus-m5d24" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.152891 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0077329a-abad-4c6d-a601-2dc01fd83184-etc-kubernetes\") pod \"multus-m5d24\" (UID: \"0077329a-abad-4c6d-a601-2dc01fd83184\") " pod="openshift-multus/multus-m5d24" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.152910 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/0077329a-abad-4c6d-a601-2dc01fd83184-host-var-lib-cni-bin\") pod \"multus-m5d24\" (UID: \"0077329a-abad-4c6d-a601-2dc01fd83184\") " pod="openshift-multus/multus-m5d24" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.152853 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/0077329a-abad-4c6d-a601-2dc01fd83184-host-var-lib-cni-multus\") pod \"multus-m5d24\" (UID: \"0077329a-abad-4c6d-a601-2dc01fd83184\") " pod="openshift-multus/multus-m5d24" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.153047 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/0077329a-abad-4c6d-a601-2dc01fd83184-multus-socket-dir-parent\") pod \"multus-m5d24\" (UID: \"0077329a-abad-4c6d-a601-2dc01fd83184\") " pod="openshift-multus/multus-m5d24" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.153083 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/0077329a-abad-4c6d-a601-2dc01fd83184-host-var-lib-kubelet\") pod \"multus-m5d24\" (UID: \"0077329a-abad-4c6d-a601-2dc01fd83184\") " pod="openshift-multus/multus-m5d24" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.153082 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/0077329a-abad-4c6d-a601-2dc01fd83184-cni-binary-copy\") pod \"multus-m5d24\" (UID: \"0077329a-abad-4c6d-a601-2dc01fd83184\") " pod="openshift-multus/multus-m5d24" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.153096 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/0077329a-abad-4c6d-a601-2dc01fd83184-system-cni-dir\") pod \"multus-m5d24\" (UID: \"0077329a-abad-4c6d-a601-2dc01fd83184\") " pod="openshift-multus/multus-m5d24" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.153125 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/0077329a-abad-4c6d-a601-2dc01fd83184-multus-cni-dir\") pod \"multus-m5d24\" (UID: \"0077329a-abad-4c6d-a601-2dc01fd83184\") " pod="openshift-multus/multus-m5d24" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.153672 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/52f5a729-05d1-4f84-a216-1df3233af57d-mcd-auth-proxy-config\") pod \"machine-config-daemon-xxlvp\" (UID: \"52f5a729-05d1-4f84-a216-1df3233af57d\") " pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.153842 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/0077329a-abad-4c6d-a601-2dc01fd83184-multus-daemon-config\") pod \"multus-m5d24\" (UID: \"0077329a-abad-4c6d-a601-2dc01fd83184\") " pod="openshift-multus/multus-m5d24" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.156965 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/52f5a729-05d1-4f84-a216-1df3233af57d-proxy-tls\") pod \"machine-config-daemon-xxlvp\" (UID: \"52f5a729-05d1-4f84-a216-1df3233af57d\") " pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.160481 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.173290 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pvdwc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87822156-53e8-4eb5-b241-db506a21a1b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jzt65\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pvdwc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.180806 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4gnvx\" (UniqueName: \"kubernetes.io/projected/52f5a729-05d1-4f84-a216-1df3233af57d-kube-api-access-4gnvx\") pod \"machine-config-daemon-xxlvp\" (UID: \"52f5a729-05d1-4f84-a216-1df3233af57d\") " pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.193105 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lmd8c\" (UniqueName: \"kubernetes.io/projected/0077329a-abad-4c6d-a601-2dc01fd83184-kube-api-access-lmd8c\") pod \"multus-m5d24\" (UID: \"0077329a-abad-4c6d-a601-2dc01fd83184\") " pod="openshift-multus/multus-m5d24" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.196180 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c335cd8-618b-4871-a0e2-deaa61ddc49a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5f6320dbfb8d910e52de319fe5350b435c1c9f00a2e1d5b2b953fb6d1688984\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://273a23deb0bee7d80bc12f28a4056a5b843e81cc7c411273e49c3aa0fdba5182\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c93dddad8f7a853e1302ba96f3fc6d6626b22de64c8cfd1ee63996820d0816cd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebb7634a6507b2323d36c3d57b19c374862e0bada0e81150da9db315e5812f12\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.213669 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1b5e12d17b3e683349818698223816569514a9f4ae5d14ba1f5661c472fce39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.225872 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1236e5c5c7c8db59fd2faa688e9b781fe94721cc8aa644dd9ab91df2684c617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.240258 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m5d24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0077329a-abad-4c6d-a601-2dc01fd83184\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmd8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m5d24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.258803 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f5a729-05d1-4f84-a216-1df3233af57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xxlvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.265055 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-m5d24" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.274626 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f826000-be5b-4f8f-bdc5-b80e11bb5e65\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cac876542527f108f89313704d6275aed6b735176f7f38b0fccbfcd79fdbf6e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa9a560543d545bd50cbb9aa0e907a992f9b3afb36de7ec5e72010dd835d2574\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c86055d41460f757efc29eaa62834faf3f14f9ca5ba534479d0fcd0a43d3bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T13:42:47Z\\\",\\\"message\\\":\\\"le observer\\\\nW1121 13:42:47.565555 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 13:42:47.567527 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 13:42:47.569658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3912986073/tls.crt::/tmp/serving-cert-3912986073/tls.key\\\\\\\"\\\\nI1121 13:42:47.852533 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 13:42:47.856751 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 13:42:47.856781 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 13:42:47.856814 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 13:42:47.856821 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 13:42:47.862211 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 13:42:47.862280 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862290 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862309 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 13:42:47.862319 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 13:42:47.862326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 13:42:47.862333 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 13:42:47.863057 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 13:42:47.865438 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6b16c41d8bc248fc4de65102a71d3875d1ab768432f61581605fa487ebfc9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:53 crc kubenswrapper[5133]: W1121 13:42:53.277034 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0077329a_abad_4c6d_a601_2dc01fd83184.slice/crio-dbf857e4f7243e8b7bc61e20d3f436d0d629b4dbf9259f2c07fbb4d6d05edfec WatchSource:0}: Error finding container dbf857e4f7243e8b7bc61e20d3f436d0d629b4dbf9259f2c07fbb4d6d05edfec: Status 404 returned error can't find the container with id dbf857e4f7243e8b7bc61e20d3f436d0d629b4dbf9259f2c07fbb4d6d05edfec Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.300385 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede95ef8b82acda5cadd081a37fcb2a35fab8269c7ec403bb33a6feb8bf9eb88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3037d1f01bc9704cae9aa3eb4760e4dc737b1990a6ae5a007d3ec412efad85a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.309724 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.315617 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:53 crc kubenswrapper[5133]: W1121 13:42:53.322870 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod52f5a729_05d1_4f84_a216_1df3233af57d.slice/crio-424fdb9aa97aeeca4315d77b9336434d62b2dfcef6b4a4ae7b51e2752a316ba9 WatchSource:0}: Error finding container 424fdb9aa97aeeca4315d77b9336434d62b2dfcef6b4a4ae7b51e2752a316ba9: Status 404 returned error can't find the container with id 424fdb9aa97aeeca4315d77b9336434d62b2dfcef6b4a4ae7b51e2752a316ba9 Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.338130 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-tjzm8"] Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.338797 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.341055 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.341932 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.341958 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.342020 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.342159 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.349399 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.352027 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-b9v8b"] Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.362676 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.362947 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-b9v8b" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.367162 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.367426 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.376729 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.416420 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.442981 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pvdwc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87822156-53e8-4eb5-b241-db506a21a1b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jzt65\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pvdwc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.457104 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.457146 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:42:53 crc kubenswrapper[5133]: E1121 13:42:53.457247 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 13:42:53 crc kubenswrapper[5133]: E1121 13:42:53.457390 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.464266 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c335cd8-618b-4871-a0e2-deaa61ddc49a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5f6320dbfb8d910e52de319fe5350b435c1c9f00a2e1d5b2b953fb6d1688984\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://273a23deb0bee7d80bc12f28a4056a5b843e81cc7c411273e49c3aa0fdba5182\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c93dddad8f7a853e1302ba96f3fc6d6626b22de64c8cfd1ee63996820d0816cd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebb7634a6507b2323d36c3d57b19c374862e0bada0e81150da9db315e5812f12\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.467982 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-run-openvswitch\") pod \"ovnkube-node-tjzm8\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.468031 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/c0ec3a98-4d89-4f36-a79e-ac65da8672ca-cni-binary-copy\") pod \"multus-additional-cni-plugins-b9v8b\" (UID: \"c0ec3a98-4d89-4f36-a79e-ac65da8672ca\") " pod="openshift-multus/multus-additional-cni-plugins-b9v8b" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.468054 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-host-kubelet\") pod \"ovnkube-node-tjzm8\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.468083 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-log-socket\") pod \"ovnkube-node-tjzm8\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.468109 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-run-ovn\") pod \"ovnkube-node-tjzm8\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.468125 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-tjzm8\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.468224 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/c0ec3a98-4d89-4f36-a79e-ac65da8672ca-system-cni-dir\") pod \"multus-additional-cni-plugins-b9v8b\" (UID: \"c0ec3a98-4d89-4f36-a79e-ac65da8672ca\") " pod="openshift-multus/multus-additional-cni-plugins-b9v8b" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.468287 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-host-cni-bin\") pod \"ovnkube-node-tjzm8\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.468305 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/373d5da7-fae9-4689-9ede-6e2d69a54c02-ovnkube-script-lib\") pod \"ovnkube-node-tjzm8\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.468325 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/373d5da7-fae9-4689-9ede-6e2d69a54c02-ovnkube-config\") pod \"ovnkube-node-tjzm8\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.468346 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/c0ec3a98-4d89-4f36-a79e-ac65da8672ca-os-release\") pod \"multus-additional-cni-plugins-b9v8b\" (UID: \"c0ec3a98-4d89-4f36-a79e-ac65da8672ca\") " pod="openshift-multus/multus-additional-cni-plugins-b9v8b" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.468370 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-systemd-units\") pod \"ovnkube-node-tjzm8\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.468449 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/c0ec3a98-4d89-4f36-a79e-ac65da8672ca-tuning-conf-dir\") pod \"multus-additional-cni-plugins-b9v8b\" (UID: \"c0ec3a98-4d89-4f36-a79e-ac65da8672ca\") " pod="openshift-multus/multus-additional-cni-plugins-b9v8b" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.468474 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/c0ec3a98-4d89-4f36-a79e-ac65da8672ca-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-b9v8b\" (UID: \"c0ec3a98-4d89-4f36-a79e-ac65da8672ca\") " pod="openshift-multus/multus-additional-cni-plugins-b9v8b" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.468495 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wqxhl\" (UniqueName: \"kubernetes.io/projected/c0ec3a98-4d89-4f36-a79e-ac65da8672ca-kube-api-access-wqxhl\") pod \"multus-additional-cni-plugins-b9v8b\" (UID: \"c0ec3a98-4d89-4f36-a79e-ac65da8672ca\") " pod="openshift-multus/multus-additional-cni-plugins-b9v8b" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.468527 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/373d5da7-fae9-4689-9ede-6e2d69a54c02-env-overrides\") pod \"ovnkube-node-tjzm8\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.468543 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/c0ec3a98-4d89-4f36-a79e-ac65da8672ca-cnibin\") pod \"multus-additional-cni-plugins-b9v8b\" (UID: \"c0ec3a98-4d89-4f36-a79e-ac65da8672ca\") " pod="openshift-multus/multus-additional-cni-plugins-b9v8b" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.468559 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lr2l4\" (UniqueName: \"kubernetes.io/projected/373d5da7-fae9-4689-9ede-6e2d69a54c02-kube-api-access-lr2l4\") pod \"ovnkube-node-tjzm8\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.468576 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-node-log\") pod \"ovnkube-node-tjzm8\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.468591 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-host-run-ovn-kubernetes\") pod \"ovnkube-node-tjzm8\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.468608 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-host-slash\") pod \"ovnkube-node-tjzm8\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.468625 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-var-lib-openvswitch\") pod \"ovnkube-node-tjzm8\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.468640 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-host-cni-netd\") pod \"ovnkube-node-tjzm8\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.468656 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/373d5da7-fae9-4689-9ede-6e2d69a54c02-ovn-node-metrics-cert\") pod \"ovnkube-node-tjzm8\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.468702 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-run-systemd\") pod \"ovnkube-node-tjzm8\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.468726 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-etc-openvswitch\") pod \"ovnkube-node-tjzm8\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.468744 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-host-run-netns\") pod \"ovnkube-node-tjzm8\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.476658 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1b5e12d17b3e683349818698223816569514a9f4ae5d14ba1f5661c472fce39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.476838 5133 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-11-21 13:37:52 +0000 UTC, rotation deadline is 2026-08-24 00:43:08.875839845 +0000 UTC Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.476948 5133 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 6611h0m15.398894892s for next certificate rotation Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.488955 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m5d24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0077329a-abad-4c6d-a601-2dc01fd83184\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmd8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m5d24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.505901 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f5a729-05d1-4f84-a216-1df3233af57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xxlvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.525306 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"373d5da7-fae9-4689-9ede-6e2d69a54c02\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tjzm8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.541528 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f826000-be5b-4f8f-bdc5-b80e11bb5e65\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cac876542527f108f89313704d6275aed6b735176f7f38b0fccbfcd79fdbf6e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa9a560543d545bd50cbb9aa0e907a992f9b3afb36de7ec5e72010dd835d2574\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c86055d41460f757efc29eaa62834faf3f14f9ca5ba534479d0fcd0a43d3bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T13:42:47Z\\\",\\\"message\\\":\\\"le observer\\\\nW1121 13:42:47.565555 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 13:42:47.567527 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 13:42:47.569658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3912986073/tls.crt::/tmp/serving-cert-3912986073/tls.key\\\\\\\"\\\\nI1121 13:42:47.852533 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 13:42:47.856751 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 13:42:47.856781 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 13:42:47.856814 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 13:42:47.856821 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 13:42:47.862211 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 13:42:47.862280 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862290 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862309 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 13:42:47.862319 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 13:42:47.862326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 13:42:47.862333 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 13:42:47.863057 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 13:42:47.865438 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6b16c41d8bc248fc4de65102a71d3875d1ab768432f61581605fa487ebfc9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.554071 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede95ef8b82acda5cadd081a37fcb2a35fab8269c7ec403bb33a6feb8bf9eb88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3037d1f01bc9704cae9aa3eb4760e4dc737b1990a6ae5a007d3ec412efad85a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.567471 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.570144 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-host-run-ovn-kubernetes\") pod \"ovnkube-node-tjzm8\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.570179 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-host-slash\") pod \"ovnkube-node-tjzm8\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.570196 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-node-log\") pod \"ovnkube-node-tjzm8\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.570212 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-host-cni-netd\") pod \"ovnkube-node-tjzm8\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.570227 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/373d5da7-fae9-4689-9ede-6e2d69a54c02-ovn-node-metrics-cert\") pod \"ovnkube-node-tjzm8\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.570251 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-var-lib-openvswitch\") pod \"ovnkube-node-tjzm8\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.570265 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-run-systemd\") pod \"ovnkube-node-tjzm8\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.570279 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-host-run-netns\") pod \"ovnkube-node-tjzm8\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.570294 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-etc-openvswitch\") pod \"ovnkube-node-tjzm8\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.570312 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-run-openvswitch\") pod \"ovnkube-node-tjzm8\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.570345 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/c0ec3a98-4d89-4f36-a79e-ac65da8672ca-cni-binary-copy\") pod \"multus-additional-cni-plugins-b9v8b\" (UID: \"c0ec3a98-4d89-4f36-a79e-ac65da8672ca\") " pod="openshift-multus/multus-additional-cni-plugins-b9v8b" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.570365 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-host-kubelet\") pod \"ovnkube-node-tjzm8\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.570381 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-log-socket\") pod \"ovnkube-node-tjzm8\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.570395 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-run-ovn\") pod \"ovnkube-node-tjzm8\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.570410 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-tjzm8\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.570431 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/c0ec3a98-4d89-4f36-a79e-ac65da8672ca-system-cni-dir\") pod \"multus-additional-cni-plugins-b9v8b\" (UID: \"c0ec3a98-4d89-4f36-a79e-ac65da8672ca\") " pod="openshift-multus/multus-additional-cni-plugins-b9v8b" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.570444 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-host-cni-bin\") pod \"ovnkube-node-tjzm8\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.570458 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/373d5da7-fae9-4689-9ede-6e2d69a54c02-ovnkube-script-lib\") pod \"ovnkube-node-tjzm8\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.570474 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/c0ec3a98-4d89-4f36-a79e-ac65da8672ca-os-release\") pod \"multus-additional-cni-plugins-b9v8b\" (UID: \"c0ec3a98-4d89-4f36-a79e-ac65da8672ca\") " pod="openshift-multus/multus-additional-cni-plugins-b9v8b" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.570489 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-systemd-units\") pod \"ovnkube-node-tjzm8\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.570514 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/373d5da7-fae9-4689-9ede-6e2d69a54c02-ovnkube-config\") pod \"ovnkube-node-tjzm8\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.570528 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/c0ec3a98-4d89-4f36-a79e-ac65da8672ca-tuning-conf-dir\") pod \"multus-additional-cni-plugins-b9v8b\" (UID: \"c0ec3a98-4d89-4f36-a79e-ac65da8672ca\") " pod="openshift-multus/multus-additional-cni-plugins-b9v8b" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.570543 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/c0ec3a98-4d89-4f36-a79e-ac65da8672ca-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-b9v8b\" (UID: \"c0ec3a98-4d89-4f36-a79e-ac65da8672ca\") " pod="openshift-multus/multus-additional-cni-plugins-b9v8b" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.570557 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wqxhl\" (UniqueName: \"kubernetes.io/projected/c0ec3a98-4d89-4f36-a79e-ac65da8672ca-kube-api-access-wqxhl\") pod \"multus-additional-cni-plugins-b9v8b\" (UID: \"c0ec3a98-4d89-4f36-a79e-ac65da8672ca\") " pod="openshift-multus/multus-additional-cni-plugins-b9v8b" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.570590 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/373d5da7-fae9-4689-9ede-6e2d69a54c02-env-overrides\") pod \"ovnkube-node-tjzm8\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.570604 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/c0ec3a98-4d89-4f36-a79e-ac65da8672ca-cnibin\") pod \"multus-additional-cni-plugins-b9v8b\" (UID: \"c0ec3a98-4d89-4f36-a79e-ac65da8672ca\") " pod="openshift-multus/multus-additional-cni-plugins-b9v8b" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.570618 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lr2l4\" (UniqueName: \"kubernetes.io/projected/373d5da7-fae9-4689-9ede-6e2d69a54c02-kube-api-access-lr2l4\") pod \"ovnkube-node-tjzm8\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.570870 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-host-run-ovn-kubernetes\") pod \"ovnkube-node-tjzm8\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.570921 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-host-slash\") pod \"ovnkube-node-tjzm8\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.570941 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-node-log\") pod \"ovnkube-node-tjzm8\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.570960 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-host-cni-netd\") pod \"ovnkube-node-tjzm8\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.571436 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-var-lib-openvswitch\") pod \"ovnkube-node-tjzm8\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.571509 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-run-systemd\") pod \"ovnkube-node-tjzm8\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.571538 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-host-run-netns\") pod \"ovnkube-node-tjzm8\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.571625 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-systemd-units\") pod \"ovnkube-node-tjzm8\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.571707 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/c0ec3a98-4d89-4f36-a79e-ac65da8672ca-tuning-conf-dir\") pod \"multus-additional-cni-plugins-b9v8b\" (UID: \"c0ec3a98-4d89-4f36-a79e-ac65da8672ca\") " pod="openshift-multus/multus-additional-cni-plugins-b9v8b" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.571755 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/c0ec3a98-4d89-4f36-a79e-ac65da8672ca-cnibin\") pod \"multus-additional-cni-plugins-b9v8b\" (UID: \"c0ec3a98-4d89-4f36-a79e-ac65da8672ca\") " pod="openshift-multus/multus-additional-cni-plugins-b9v8b" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.571785 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-host-kubelet\") pod \"ovnkube-node-tjzm8\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.571725 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/c0ec3a98-4d89-4f36-a79e-ac65da8672ca-os-release\") pod \"multus-additional-cni-plugins-b9v8b\" (UID: \"c0ec3a98-4d89-4f36-a79e-ac65da8672ca\") " pod="openshift-multus/multus-additional-cni-plugins-b9v8b" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.571796 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-run-ovn\") pod \"ovnkube-node-tjzm8\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.571824 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/c0ec3a98-4d89-4f36-a79e-ac65da8672ca-system-cni-dir\") pod \"multus-additional-cni-plugins-b9v8b\" (UID: \"c0ec3a98-4d89-4f36-a79e-ac65da8672ca\") " pod="openshift-multus/multus-additional-cni-plugins-b9v8b" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.571797 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-etc-openvswitch\") pod \"ovnkube-node-tjzm8\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.571798 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-tjzm8\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.571827 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-run-openvswitch\") pod \"ovnkube-node-tjzm8\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.571847 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-log-socket\") pod \"ovnkube-node-tjzm8\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.571641 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-host-cni-bin\") pod \"ovnkube-node-tjzm8\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.572190 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/373d5da7-fae9-4689-9ede-6e2d69a54c02-env-overrides\") pod \"ovnkube-node-tjzm8\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.572267 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/373d5da7-fae9-4689-9ede-6e2d69a54c02-ovnkube-script-lib\") pod \"ovnkube-node-tjzm8\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.572506 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/c0ec3a98-4d89-4f36-a79e-ac65da8672ca-cni-binary-copy\") pod \"multus-additional-cni-plugins-b9v8b\" (UID: \"c0ec3a98-4d89-4f36-a79e-ac65da8672ca\") " pod="openshift-multus/multus-additional-cni-plugins-b9v8b" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.572648 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/373d5da7-fae9-4689-9ede-6e2d69a54c02-ovnkube-config\") pod \"ovnkube-node-tjzm8\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.572846 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/c0ec3a98-4d89-4f36-a79e-ac65da8672ca-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-b9v8b\" (UID: \"c0ec3a98-4d89-4f36-a79e-ac65da8672ca\") " pod="openshift-multus/multus-additional-cni-plugins-b9v8b" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.575455 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/373d5da7-fae9-4689-9ede-6e2d69a54c02-ovn-node-metrics-cert\") pod \"ovnkube-node-tjzm8\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.594755 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1236e5c5c7c8db59fd2faa688e9b781fe94721cc8aa644dd9ab91df2684c617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.600736 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wqxhl\" (UniqueName: \"kubernetes.io/projected/c0ec3a98-4d89-4f36-a79e-ac65da8672ca-kube-api-access-wqxhl\") pod \"multus-additional-cni-plugins-b9v8b\" (UID: \"c0ec3a98-4d89-4f36-a79e-ac65da8672ca\") " pod="openshift-multus/multus-additional-cni-plugins-b9v8b" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.606394 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lr2l4\" (UniqueName: \"kubernetes.io/projected/373d5da7-fae9-4689-9ede-6e2d69a54c02-kube-api-access-lr2l4\") pod \"ovnkube-node-tjzm8\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.619247 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f826000-be5b-4f8f-bdc5-b80e11bb5e65\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cac876542527f108f89313704d6275aed6b735176f7f38b0fccbfcd79fdbf6e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa9a560543d545bd50cbb9aa0e907a992f9b3afb36de7ec5e72010dd835d2574\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c86055d41460f757efc29eaa62834faf3f14f9ca5ba534479d0fcd0a43d3bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T13:42:47Z\\\",\\\"message\\\":\\\"le observer\\\\nW1121 13:42:47.565555 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 13:42:47.567527 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 13:42:47.569658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3912986073/tls.crt::/tmp/serving-cert-3912986073/tls.key\\\\\\\"\\\\nI1121 13:42:47.852533 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 13:42:47.856751 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 13:42:47.856781 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 13:42:47.856814 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 13:42:47.856821 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 13:42:47.862211 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 13:42:47.862280 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862290 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862309 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 13:42:47.862319 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 13:42:47.862326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 13:42:47.862333 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 13:42:47.863057 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 13:42:47.865438 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6b16c41d8bc248fc4de65102a71d3875d1ab768432f61581605fa487ebfc9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.632311 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede95ef8b82acda5cadd081a37fcb2a35fab8269c7ec403bb33a6feb8bf9eb88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3037d1f01bc9704cae9aa3eb4760e4dc737b1990a6ae5a007d3ec412efad85a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.643899 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.652901 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.661127 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1236e5c5c7c8db59fd2faa688e9b781fe94721cc8aa644dd9ab91df2684c617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.668326 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" event={"ID":"52f5a729-05d1-4f84-a216-1df3233af57d","Type":"ContainerStarted","Data":"c46519115b067feef9d8fb5783b8b9420bf99d97515021a7d389e6cdf1d64112"} Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.668374 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" event={"ID":"52f5a729-05d1-4f84-a216-1df3233af57d","Type":"ContainerStarted","Data":"9e5c730e837240b2ed45dff8a5411b8b49d21e7fbfb2dfcc6aef568b73b57745"} Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.668389 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" event={"ID":"52f5a729-05d1-4f84-a216-1df3233af57d","Type":"ContainerStarted","Data":"424fdb9aa97aeeca4315d77b9336434d62b2dfcef6b4a4ae7b51e2752a316ba9"} Nov 21 13:42:53 crc kubenswrapper[5133]: W1121 13:42:53.669085 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod373d5da7_fae9_4689_9ede_6e2d69a54c02.slice/crio-05132343d4c1747126533aa575d4729775e3869e00592199528d1b445ef158ae WatchSource:0}: Error finding container 05132343d4c1747126533aa575d4729775e3869e00592199528d1b445ef158ae: Status 404 returned error can't find the container with id 05132343d4c1747126533aa575d4729775e3869e00592199528d1b445ef158ae Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.669647 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-m5d24" event={"ID":"0077329a-abad-4c6d-a601-2dc01fd83184","Type":"ContainerStarted","Data":"24fe246ff402a8854ee5e55ccc507a2e497dbb2cdfed3f0f8b380f00b9436661"} Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.669684 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-m5d24" event={"ID":"0077329a-abad-4c6d-a601-2dc01fd83184","Type":"ContainerStarted","Data":"dbf857e4f7243e8b7bc61e20d3f436d0d629b4dbf9259f2c07fbb4d6d05edfec"} Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.670997 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-pvdwc" event={"ID":"87822156-53e8-4eb5-b241-db506a21a1b9","Type":"ContainerStarted","Data":"dc374f8a6deccf60de941df327adc9f29d951a10746bd754c0d4f5573a141a71"} Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.671031 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-pvdwc" event={"ID":"87822156-53e8-4eb5-b241-db506a21a1b9","Type":"ContainerStarted","Data":"4faf62198dec7c71a7587dfc42f10d11184d7d830ff489f2c8457b0fecc4a492"} Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.686595 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m5d24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0077329a-abad-4c6d-a601-2dc01fd83184\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmd8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m5d24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.693408 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-b9v8b" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.700600 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f5a729-05d1-4f84-a216-1df3233af57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xxlvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:53 crc kubenswrapper[5133]: W1121 13:42:53.705560 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc0ec3a98_4d89_4f36_a79e_ac65da8672ca.slice/crio-5a2b45200e94164dd403b64b2732c5a5173471731c6914d2f7f5ea2db5c57735 WatchSource:0}: Error finding container 5a2b45200e94164dd403b64b2732c5a5173471731c6914d2f7f5ea2db5c57735: Status 404 returned error can't find the container with id 5a2b45200e94164dd403b64b2732c5a5173471731c6914d2f7f5ea2db5c57735 Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.723984 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"373d5da7-fae9-4689-9ede-6e2d69a54c02\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tjzm8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.738249 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b9v8b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0ec3a98-4d89-4f36-a79e-ac65da8672ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b9v8b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.753522 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.776200 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.792818 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pvdwc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87822156-53e8-4eb5-b241-db506a21a1b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jzt65\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pvdwc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.806298 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c335cd8-618b-4871-a0e2-deaa61ddc49a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5f6320dbfb8d910e52de319fe5350b435c1c9f00a2e1d5b2b953fb6d1688984\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://273a23deb0bee7d80bc12f28a4056a5b843e81cc7c411273e49c3aa0fdba5182\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c93dddad8f7a853e1302ba96f3fc6d6626b22de64c8cfd1ee63996820d0816cd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebb7634a6507b2323d36c3d57b19c374862e0bada0e81150da9db315e5812f12\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.819869 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1b5e12d17b3e683349818698223816569514a9f4ae5d14ba1f5661c472fce39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.820040 5133 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.820585 5133 scope.go:117] "RemoveContainer" containerID="5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73" Nov 21 13:42:53 crc kubenswrapper[5133]: E1121 13:42:53.820729 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.834067 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.848557 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.860135 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pvdwc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87822156-53e8-4eb5-b241-db506a21a1b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc374f8a6deccf60de941df327adc9f29d951a10746bd754c0d4f5573a141a71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jzt65\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pvdwc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.871967 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c335cd8-618b-4871-a0e2-deaa61ddc49a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5f6320dbfb8d910e52de319fe5350b435c1c9f00a2e1d5b2b953fb6d1688984\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://273a23deb0bee7d80bc12f28a4056a5b843e81cc7c411273e49c3aa0fdba5182\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c93dddad8f7a853e1302ba96f3fc6d6626b22de64c8cfd1ee63996820d0816cd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebb7634a6507b2323d36c3d57b19c374862e0bada0e81150da9db315e5812f12\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.886501 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1b5e12d17b3e683349818698223816569514a9f4ae5d14ba1f5661c472fce39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.897945 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m5d24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0077329a-abad-4c6d-a601-2dc01fd83184\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24fe246ff402a8854ee5e55ccc507a2e497dbb2cdfed3f0f8b380f00b9436661\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmd8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m5d24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.913914 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f5a729-05d1-4f84-a216-1df3233af57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c46519115b067feef9d8fb5783b8b9420bf99d97515021a7d389e6cdf1d64112\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e5c730e837240b2ed45dff8a5411b8b49d21e7fbfb2dfcc6aef568b73b57745\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xxlvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.930143 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"373d5da7-fae9-4689-9ede-6e2d69a54c02\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tjzm8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.944823 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b9v8b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0ec3a98-4d89-4f36-a79e-ac65da8672ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b9v8b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:53 crc kubenswrapper[5133]: I1121 13:42:53.969688 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f826000-be5b-4f8f-bdc5-b80e11bb5e65\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cac876542527f108f89313704d6275aed6b735176f7f38b0fccbfcd79fdbf6e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa9a560543d545bd50cbb9aa0e907a992f9b3afb36de7ec5e72010dd835d2574\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c86055d41460f757efc29eaa62834faf3f14f9ca5ba534479d0fcd0a43d3bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T13:42:47Z\\\",\\\"message\\\":\\\"le observer\\\\nW1121 13:42:47.565555 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 13:42:47.567527 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 13:42:47.569658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3912986073/tls.crt::/tmp/serving-cert-3912986073/tls.key\\\\\\\"\\\\nI1121 13:42:47.852533 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 13:42:47.856751 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 13:42:47.856781 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 13:42:47.856814 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 13:42:47.856821 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 13:42:47.862211 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 13:42:47.862280 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862290 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862309 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 13:42:47.862319 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 13:42:47.862326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 13:42:47.862333 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 13:42:47.863057 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 13:42:47.865438 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6b16c41d8bc248fc4de65102a71d3875d1ab768432f61581605fa487ebfc9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.011420 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede95ef8b82acda5cadd081a37fcb2a35fab8269c7ec403bb33a6feb8bf9eb88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3037d1f01bc9704cae9aa3eb4760e4dc737b1990a6ae5a007d3ec412efad85a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:54Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.046961 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:54Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.088258 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1236e5c5c7c8db59fd2faa688e9b781fe94721cc8aa644dd9ab91df2684c617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:54Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.457413 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:42:54 crc kubenswrapper[5133]: E1121 13:42:54.457577 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.549829 5133 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.551425 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.551455 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.551464 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.551576 5133 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.556563 5133 kubelet_node_status.go:115] "Node was previously registered" node="crc" Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.556794 5133 kubelet_node_status.go:79] "Successfully registered node" node="crc" Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.557805 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.557857 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.557872 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.557895 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.557914 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:42:54Z","lastTransitionTime":"2025-11-21T13:42:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:42:54 crc kubenswrapper[5133]: E1121 13:42:54.649574 5133 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"eb1f691e-5306-40d5-9666-4e51161aa15a\\\",\\\"systemUUID\\\":\\\"537cb059-79e6-48e5-b353-57bb495db8a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:54Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.652595 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.652616 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.652623 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.652635 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.652644 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:42:54Z","lastTransitionTime":"2025-11-21T13:42:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:42:54 crc kubenswrapper[5133]: E1121 13:42:54.662888 5133 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"eb1f691e-5306-40d5-9666-4e51161aa15a\\\",\\\"systemUUID\\\":\\\"537cb059-79e6-48e5-b353-57bb495db8a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:54Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.665526 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.665561 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.665569 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.665582 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.665592 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:42:54Z","lastTransitionTime":"2025-11-21T13:42:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.674590 5133 generic.go:334] "Generic (PLEG): container finished" podID="c0ec3a98-4d89-4f36-a79e-ac65da8672ca" containerID="79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848" exitCode=0 Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.674668 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-b9v8b" event={"ID":"c0ec3a98-4d89-4f36-a79e-ac65da8672ca","Type":"ContainerDied","Data":"79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848"} Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.674731 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-b9v8b" event={"ID":"c0ec3a98-4d89-4f36-a79e-ac65da8672ca","Type":"ContainerStarted","Data":"5a2b45200e94164dd403b64b2732c5a5173471731c6914d2f7f5ea2db5c57735"} Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.675800 5133 generic.go:334] "Generic (PLEG): container finished" podID="373d5da7-fae9-4689-9ede-6e2d69a54c02" containerID="a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b" exitCode=0 Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.675833 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" event={"ID":"373d5da7-fae9-4689-9ede-6e2d69a54c02","Type":"ContainerDied","Data":"a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b"} Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.675882 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" event={"ID":"373d5da7-fae9-4689-9ede-6e2d69a54c02","Type":"ContainerStarted","Data":"05132343d4c1747126533aa575d4729775e3869e00592199528d1b445ef158ae"} Nov 21 13:42:54 crc kubenswrapper[5133]: E1121 13:42:54.681314 5133 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"eb1f691e-5306-40d5-9666-4e51161aa15a\\\",\\\"systemUUID\\\":\\\"537cb059-79e6-48e5-b353-57bb495db8a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:54Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.688068 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.688108 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.688120 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.688135 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.688147 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:42:54Z","lastTransitionTime":"2025-11-21T13:42:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:42:54 crc kubenswrapper[5133]: E1121 13:42:54.705098 5133 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"eb1f691e-5306-40d5-9666-4e51161aa15a\\\",\\\"systemUUID\\\":\\\"537cb059-79e6-48e5-b353-57bb495db8a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:54Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.706295 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"373d5da7-fae9-4689-9ede-6e2d69a54c02\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tjzm8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:54Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.708866 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.708894 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.708907 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.708922 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.708935 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:42:54Z","lastTransitionTime":"2025-11-21T13:42:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.720973 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b9v8b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0ec3a98-4d89-4f36-a79e-ac65da8672ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b9v8b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:54Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:54 crc kubenswrapper[5133]: E1121 13:42:54.721123 5133 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"eb1f691e-5306-40d5-9666-4e51161aa15a\\\",\\\"systemUUID\\\":\\\"537cb059-79e6-48e5-b353-57bb495db8a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:54Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:54 crc kubenswrapper[5133]: E1121 13:42:54.721263 5133 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.723341 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.723375 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.723384 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.723398 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.723409 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:42:54Z","lastTransitionTime":"2025-11-21T13:42:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.734168 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f826000-be5b-4f8f-bdc5-b80e11bb5e65\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cac876542527f108f89313704d6275aed6b735176f7f38b0fccbfcd79fdbf6e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa9a560543d545bd50cbb9aa0e907a992f9b3afb36de7ec5e72010dd835d2574\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c86055d41460f757efc29eaa62834faf3f14f9ca5ba534479d0fcd0a43d3bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T13:42:47Z\\\",\\\"message\\\":\\\"le observer\\\\nW1121 13:42:47.565555 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 13:42:47.567527 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 13:42:47.569658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3912986073/tls.crt::/tmp/serving-cert-3912986073/tls.key\\\\\\\"\\\\nI1121 13:42:47.852533 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 13:42:47.856751 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 13:42:47.856781 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 13:42:47.856814 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 13:42:47.856821 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 13:42:47.862211 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 13:42:47.862280 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862290 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862309 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 13:42:47.862319 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 13:42:47.862326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 13:42:47.862333 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 13:42:47.863057 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 13:42:47.865438 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6b16c41d8bc248fc4de65102a71d3875d1ab768432f61581605fa487ebfc9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:54Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.747505 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede95ef8b82acda5cadd081a37fcb2a35fab8269c7ec403bb33a6feb8bf9eb88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3037d1f01bc9704cae9aa3eb4760e4dc737b1990a6ae5a007d3ec412efad85a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:54Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.759239 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:54Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.770906 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1236e5c5c7c8db59fd2faa688e9b781fe94721cc8aa644dd9ab91df2684c617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:54Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.783880 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m5d24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0077329a-abad-4c6d-a601-2dc01fd83184\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24fe246ff402a8854ee5e55ccc507a2e497dbb2cdfed3f0f8b380f00b9436661\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmd8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m5d24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:54Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.795463 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f5a729-05d1-4f84-a216-1df3233af57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c46519115b067feef9d8fb5783b8b9420bf99d97515021a7d389e6cdf1d64112\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e5c730e837240b2ed45dff8a5411b8b49d21e7fbfb2dfcc6aef568b73b57745\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xxlvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:54Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.808764 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:54Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.825457 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:54Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.826114 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.826164 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.826180 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.826200 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.826211 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:42:54Z","lastTransitionTime":"2025-11-21T13:42:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.835352 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pvdwc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87822156-53e8-4eb5-b241-db506a21a1b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc374f8a6deccf60de941df327adc9f29d951a10746bd754c0d4f5573a141a71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jzt65\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pvdwc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:54Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.848749 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c335cd8-618b-4871-a0e2-deaa61ddc49a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5f6320dbfb8d910e52de319fe5350b435c1c9f00a2e1d5b2b953fb6d1688984\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://273a23deb0bee7d80bc12f28a4056a5b843e81cc7c411273e49c3aa0fdba5182\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c93dddad8f7a853e1302ba96f3fc6d6626b22de64c8cfd1ee63996820d0816cd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebb7634a6507b2323d36c3d57b19c374862e0bada0e81150da9db315e5812f12\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:54Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.859759 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1b5e12d17b3e683349818698223816569514a9f4ae5d14ba1f5661c472fce39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:54Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.870611 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:54Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.879440 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pvdwc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87822156-53e8-4eb5-b241-db506a21a1b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc374f8a6deccf60de941df327adc9f29d951a10746bd754c0d4f5573a141a71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jzt65\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pvdwc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:54Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.890685 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c335cd8-618b-4871-a0e2-deaa61ddc49a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5f6320dbfb8d910e52de319fe5350b435c1c9f00a2e1d5b2b953fb6d1688984\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://273a23deb0bee7d80bc12f28a4056a5b843e81cc7c411273e49c3aa0fdba5182\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c93dddad8f7a853e1302ba96f3fc6d6626b22de64c8cfd1ee63996820d0816cd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebb7634a6507b2323d36c3d57b19c374862e0bada0e81150da9db315e5812f12\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:54Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.902029 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1b5e12d17b3e683349818698223816569514a9f4ae5d14ba1f5661c472fce39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:54Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.911589 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f5a729-05d1-4f84-a216-1df3233af57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c46519115b067feef9d8fb5783b8b9420bf99d97515021a7d389e6cdf1d64112\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e5c730e837240b2ed45dff8a5411b8b49d21e7fbfb2dfcc6aef568b73b57745\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xxlvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:54Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.928873 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.928917 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.928928 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.928945 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.928957 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:42:54Z","lastTransitionTime":"2025-11-21T13:42:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.933235 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"373d5da7-fae9-4689-9ede-6e2d69a54c02\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tjzm8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:54Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.947345 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b9v8b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0ec3a98-4d89-4f36-a79e-ac65da8672ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b9v8b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:54Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:54 crc kubenswrapper[5133]: I1121 13:42:54.966850 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f826000-be5b-4f8f-bdc5-b80e11bb5e65\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cac876542527f108f89313704d6275aed6b735176f7f38b0fccbfcd79fdbf6e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa9a560543d545bd50cbb9aa0e907a992f9b3afb36de7ec5e72010dd835d2574\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c86055d41460f757efc29eaa62834faf3f14f9ca5ba534479d0fcd0a43d3bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T13:42:47Z\\\",\\\"message\\\":\\\"le observer\\\\nW1121 13:42:47.565555 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 13:42:47.567527 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 13:42:47.569658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3912986073/tls.crt::/tmp/serving-cert-3912986073/tls.key\\\\\\\"\\\\nI1121 13:42:47.852533 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 13:42:47.856751 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 13:42:47.856781 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 13:42:47.856814 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 13:42:47.856821 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 13:42:47.862211 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 13:42:47.862280 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862290 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862309 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 13:42:47.862319 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 13:42:47.862326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 13:42:47.862333 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 13:42:47.863057 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 13:42:47.865438 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6b16c41d8bc248fc4de65102a71d3875d1ab768432f61581605fa487ebfc9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:54Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.017346 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede95ef8b82acda5cadd081a37fcb2a35fab8269c7ec403bb33a6feb8bf9eb88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3037d1f01bc9704cae9aa3eb4760e4dc737b1990a6ae5a007d3ec412efad85a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:55Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.031335 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.031357 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.031364 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.031376 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.031385 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:42:55Z","lastTransitionTime":"2025-11-21T13:42:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.046245 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:55Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.089183 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1236e5c5c7c8db59fd2faa688e9b781fe94721cc8aa644dd9ab91df2684c617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:55Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.131458 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m5d24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0077329a-abad-4c6d-a601-2dc01fd83184\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24fe246ff402a8854ee5e55ccc507a2e497dbb2cdfed3f0f8b380f00b9436661\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmd8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m5d24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:55Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.134378 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.134422 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.134442 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.134466 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.134483 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:42:55Z","lastTransitionTime":"2025-11-21T13:42:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.170275 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:55Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.237586 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.237631 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.237647 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.237665 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.237677 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:42:55Z","lastTransitionTime":"2025-11-21T13:42:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.339095 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.339122 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.339131 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.339143 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.339153 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:42:55Z","lastTransitionTime":"2025-11-21T13:42:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.441677 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.442313 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.442437 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.442606 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.442784 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:42:55Z","lastTransitionTime":"2025-11-21T13:42:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.457039 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:42:55 crc kubenswrapper[5133]: E1121 13:42:55.457378 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.457664 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:42:55 crc kubenswrapper[5133]: E1121 13:42:55.458161 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.545497 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.545552 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.545565 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.545583 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.545595 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:42:55Z","lastTransitionTime":"2025-11-21T13:42:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.605469 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-bj52j"] Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.606310 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-bj52j" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.608139 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.608291 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.608435 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.609150 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.622824 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m5d24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0077329a-abad-4c6d-a601-2dc01fd83184\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24fe246ff402a8854ee5e55ccc507a2e497dbb2cdfed3f0f8b380f00b9436661\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmd8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m5d24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:55Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.634553 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f5a729-05d1-4f84-a216-1df3233af57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c46519115b067feef9d8fb5783b8b9420bf99d97515021a7d389e6cdf1d64112\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e5c730e837240b2ed45dff8a5411b8b49d21e7fbfb2dfcc6aef568b73b57745\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xxlvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:55Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.648906 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.648956 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.648970 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.648989 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.649021 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:42:55Z","lastTransitionTime":"2025-11-21T13:42:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.652372 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"373d5da7-fae9-4689-9ede-6e2d69a54c02\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tjzm8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:55Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.672326 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b9v8b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0ec3a98-4d89-4f36-a79e-ac65da8672ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b9v8b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:55Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.686941 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-b9v8b" event={"ID":"c0ec3a98-4d89-4f36-a79e-ac65da8672ca","Type":"ContainerStarted","Data":"775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50"} Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.691801 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" event={"ID":"373d5da7-fae9-4689-9ede-6e2d69a54c02","Type":"ContainerStarted","Data":"94aee1dbbc6cd90fac255e86ddb27f159eba2e08dc6cc749a8eb351842330ee6"} Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.691848 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" event={"ID":"373d5da7-fae9-4689-9ede-6e2d69a54c02","Type":"ContainerStarted","Data":"5b6bfce121246f367a034c172b839a31fe309cfc0f83db7ab4e48cb26d6a5145"} Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.691866 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" event={"ID":"373d5da7-fae9-4689-9ede-6e2d69a54c02","Type":"ContainerStarted","Data":"ce076f27563e648bcbfd183634e87e0e31cedc359d0df1edc6af448b2a18f1a1"} Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.691882 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" event={"ID":"373d5da7-fae9-4689-9ede-6e2d69a54c02","Type":"ContainerStarted","Data":"563b9e061f37ddab57173a01efbf7bf025c470edccc47a03e7c5bb1e317a289f"} Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.691895 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" event={"ID":"373d5da7-fae9-4689-9ede-6e2d69a54c02","Type":"ContainerStarted","Data":"fd5003cc4327d8234259623232e844463af0efdc0b3e395fa3e2c30c714b872d"} Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.693540 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f826000-be5b-4f8f-bdc5-b80e11bb5e65\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cac876542527f108f89313704d6275aed6b735176f7f38b0fccbfcd79fdbf6e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa9a560543d545bd50cbb9aa0e907a992f9b3afb36de7ec5e72010dd835d2574\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c86055d41460f757efc29eaa62834faf3f14f9ca5ba534479d0fcd0a43d3bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T13:42:47Z\\\",\\\"message\\\":\\\"le observer\\\\nW1121 13:42:47.565555 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 13:42:47.567527 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 13:42:47.569658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3912986073/tls.crt::/tmp/serving-cert-3912986073/tls.key\\\\\\\"\\\\nI1121 13:42:47.852533 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 13:42:47.856751 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 13:42:47.856781 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 13:42:47.856814 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 13:42:47.856821 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 13:42:47.862211 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 13:42:47.862280 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862290 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862309 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 13:42:47.862319 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 13:42:47.862326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 13:42:47.862333 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 13:42:47.863057 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 13:42:47.865438 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6b16c41d8bc248fc4de65102a71d3875d1ab768432f61581605fa487ebfc9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:55Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.695955 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zdp6q\" (UniqueName: \"kubernetes.io/projected/f9cc533c-2914-45d2-97b4-d6e35361450d-kube-api-access-zdp6q\") pod \"node-ca-bj52j\" (UID: \"f9cc533c-2914-45d2-97b4-d6e35361450d\") " pod="openshift-image-registry/node-ca-bj52j" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.696449 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/f9cc533c-2914-45d2-97b4-d6e35361450d-serviceca\") pod \"node-ca-bj52j\" (UID: \"f9cc533c-2914-45d2-97b4-d6e35361450d\") " pod="openshift-image-registry/node-ca-bj52j" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.696525 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f9cc533c-2914-45d2-97b4-d6e35361450d-host\") pod \"node-ca-bj52j\" (UID: \"f9cc533c-2914-45d2-97b4-d6e35361450d\") " pod="openshift-image-registry/node-ca-bj52j" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.711855 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede95ef8b82acda5cadd081a37fcb2a35fab8269c7ec403bb33a6feb8bf9eb88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3037d1f01bc9704cae9aa3eb4760e4dc737b1990a6ae5a007d3ec412efad85a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:55Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.732318 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:55Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.746451 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1236e5c5c7c8db59fd2faa688e9b781fe94721cc8aa644dd9ab91df2684c617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:55Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.750956 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.751022 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.751032 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.751048 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.751057 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:42:55Z","lastTransitionTime":"2025-11-21T13:42:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.763913 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:55Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.775470 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:55Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.787747 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pvdwc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87822156-53e8-4eb5-b241-db506a21a1b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc374f8a6deccf60de941df327adc9f29d951a10746bd754c0d4f5573a141a71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jzt65\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pvdwc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:55Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.797924 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f9cc533c-2914-45d2-97b4-d6e35361450d-host\") pod \"node-ca-bj52j\" (UID: \"f9cc533c-2914-45d2-97b4-d6e35361450d\") " pod="openshift-image-registry/node-ca-bj52j" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.797964 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zdp6q\" (UniqueName: \"kubernetes.io/projected/f9cc533c-2914-45d2-97b4-d6e35361450d-kube-api-access-zdp6q\") pod \"node-ca-bj52j\" (UID: \"f9cc533c-2914-45d2-97b4-d6e35361450d\") " pod="openshift-image-registry/node-ca-bj52j" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.797993 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/f9cc533c-2914-45d2-97b4-d6e35361450d-serviceca\") pod \"node-ca-bj52j\" (UID: \"f9cc533c-2914-45d2-97b4-d6e35361450d\") " pod="openshift-image-registry/node-ca-bj52j" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.798036 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f9cc533c-2914-45d2-97b4-d6e35361450d-host\") pod \"node-ca-bj52j\" (UID: \"f9cc533c-2914-45d2-97b4-d6e35361450d\") " pod="openshift-image-registry/node-ca-bj52j" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.798884 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/f9cc533c-2914-45d2-97b4-d6e35361450d-serviceca\") pod \"node-ca-bj52j\" (UID: \"f9cc533c-2914-45d2-97b4-d6e35361450d\") " pod="openshift-image-registry/node-ca-bj52j" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.801197 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c335cd8-618b-4871-a0e2-deaa61ddc49a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5f6320dbfb8d910e52de319fe5350b435c1c9f00a2e1d5b2b953fb6d1688984\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://273a23deb0bee7d80bc12f28a4056a5b843e81cc7c411273e49c3aa0fdba5182\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c93dddad8f7a853e1302ba96f3fc6d6626b22de64c8cfd1ee63996820d0816cd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebb7634a6507b2323d36c3d57b19c374862e0bada0e81150da9db315e5812f12\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:55Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.815823 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1b5e12d17b3e683349818698223816569514a9f4ae5d14ba1f5661c472fce39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:55Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.816593 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zdp6q\" (UniqueName: \"kubernetes.io/projected/f9cc533c-2914-45d2-97b4-d6e35361450d-kube-api-access-zdp6q\") pod \"node-ca-bj52j\" (UID: \"f9cc533c-2914-45d2-97b4-d6e35361450d\") " pod="openshift-image-registry/node-ca-bj52j" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.827186 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bj52j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9cc533c-2914-45d2-97b4-d6e35361450d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdp6q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bj52j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:55Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.853241 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.853458 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.853555 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.853638 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.853714 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:42:55Z","lastTransitionTime":"2025-11-21T13:42:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.866815 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c335cd8-618b-4871-a0e2-deaa61ddc49a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5f6320dbfb8d910e52de319fe5350b435c1c9f00a2e1d5b2b953fb6d1688984\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://273a23deb0bee7d80bc12f28a4056a5b843e81cc7c411273e49c3aa0fdba5182\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c93dddad8f7a853e1302ba96f3fc6d6626b22de64c8cfd1ee63996820d0816cd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebb7634a6507b2323d36c3d57b19c374862e0bada0e81150da9db315e5812f12\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:55Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.898347 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:42:55 crc kubenswrapper[5133]: E1121 13:42:55.898555 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 13:43:03.898540617 +0000 UTC m=+43.696372865 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.909308 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1b5e12d17b3e683349818698223816569514a9f4ae5d14ba1f5661c472fce39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:55Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.920573 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-bj52j" Nov 21 13:42:55 crc kubenswrapper[5133]: W1121 13:42:55.936943 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf9cc533c_2914_45d2_97b4_d6e35361450d.slice/crio-2619d27c2b2b5660232a68267ec1c5712073d1e78ec3ed4570f5d3691b8b1789 WatchSource:0}: Error finding container 2619d27c2b2b5660232a68267ec1c5712073d1e78ec3ed4570f5d3691b8b1789: Status 404 returned error can't find the container with id 2619d27c2b2b5660232a68267ec1c5712073d1e78ec3ed4570f5d3691b8b1789 Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.947749 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bj52j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9cc533c-2914-45d2-97b4-d6e35361450d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdp6q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bj52j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:55Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.956507 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.956534 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.956545 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.956560 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.956589 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:42:55Z","lastTransitionTime":"2025-11-21T13:42:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.985790 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f5a729-05d1-4f84-a216-1df3233af57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c46519115b067feef9d8fb5783b8b9420bf99d97515021a7d389e6cdf1d64112\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e5c730e837240b2ed45dff8a5411b8b49d21e7fbfb2dfcc6aef568b73b57745\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xxlvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:55Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.999538 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.999607 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.999652 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:42:55 crc kubenswrapper[5133]: I1121 13:42:55.999691 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:42:55 crc kubenswrapper[5133]: E1121 13:42:55.999759 5133 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 21 13:42:55 crc kubenswrapper[5133]: E1121 13:42:55.999791 5133 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 21 13:42:55 crc kubenswrapper[5133]: E1121 13:42:55.999807 5133 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 13:42:55 crc kubenswrapper[5133]: E1121 13:42:55.999818 5133 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 21 13:42:55 crc kubenswrapper[5133]: E1121 13:42:55.999839 5133 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 21 13:42:55 crc kubenswrapper[5133]: E1121 13:42:55.999872 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-21 13:43:03.999850747 +0000 UTC m=+43.797683005 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 13:42:56 crc kubenswrapper[5133]: E1121 13:42:56.000050 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-21 13:43:04.000035672 +0000 UTC m=+43.797867940 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 21 13:42:56 crc kubenswrapper[5133]: E1121 13:42:56.000069 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-21 13:43:04.000060683 +0000 UTC m=+43.797892961 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 21 13:42:56 crc kubenswrapper[5133]: E1121 13:42:55.999955 5133 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 21 13:42:56 crc kubenswrapper[5133]: E1121 13:42:56.000090 5133 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 21 13:42:56 crc kubenswrapper[5133]: E1121 13:42:56.000101 5133 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 13:42:56 crc kubenswrapper[5133]: E1121 13:42:56.000142 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-21 13:43:04.000129314 +0000 UTC m=+43.797961572 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.034882 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"373d5da7-fae9-4689-9ede-6e2d69a54c02\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tjzm8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:56Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.062408 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.062446 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.062456 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.062472 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.062482 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:42:56Z","lastTransitionTime":"2025-11-21T13:42:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.077244 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b9v8b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0ec3a98-4d89-4f36-a79e-ac65da8672ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b9v8b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:56Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.107406 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f826000-be5b-4f8f-bdc5-b80e11bb5e65\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cac876542527f108f89313704d6275aed6b735176f7f38b0fccbfcd79fdbf6e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa9a560543d545bd50cbb9aa0e907a992f9b3afb36de7ec5e72010dd835d2574\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c86055d41460f757efc29eaa62834faf3f14f9ca5ba534479d0fcd0a43d3bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T13:42:47Z\\\",\\\"message\\\":\\\"le observer\\\\nW1121 13:42:47.565555 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 13:42:47.567527 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 13:42:47.569658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3912986073/tls.crt::/tmp/serving-cert-3912986073/tls.key\\\\\\\"\\\\nI1121 13:42:47.852533 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 13:42:47.856751 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 13:42:47.856781 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 13:42:47.856814 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 13:42:47.856821 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 13:42:47.862211 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 13:42:47.862280 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862290 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862309 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 13:42:47.862319 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 13:42:47.862326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 13:42:47.862333 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 13:42:47.863057 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 13:42:47.865438 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6b16c41d8bc248fc4de65102a71d3875d1ab768432f61581605fa487ebfc9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:56Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.145821 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede95ef8b82acda5cadd081a37fcb2a35fab8269c7ec403bb33a6feb8bf9eb88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3037d1f01bc9704cae9aa3eb4760e4dc737b1990a6ae5a007d3ec412efad85a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:56Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.164736 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.164773 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.164788 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.164805 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.164817 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:42:56Z","lastTransitionTime":"2025-11-21T13:42:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.185347 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:56Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.225725 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1236e5c5c7c8db59fd2faa688e9b781fe94721cc8aa644dd9ab91df2684c617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:56Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.266951 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.267013 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.267025 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.267044 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.267057 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:42:56Z","lastTransitionTime":"2025-11-21T13:42:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.268241 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m5d24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0077329a-abad-4c6d-a601-2dc01fd83184\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24fe246ff402a8854ee5e55ccc507a2e497dbb2cdfed3f0f8b380f00b9436661\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmd8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m5d24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:56Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.305951 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:56Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.346252 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:56Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.368854 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.368897 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.368907 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.368922 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.368934 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:42:56Z","lastTransitionTime":"2025-11-21T13:42:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.386078 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pvdwc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87822156-53e8-4eb5-b241-db506a21a1b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc374f8a6deccf60de941df327adc9f29d951a10746bd754c0d4f5573a141a71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jzt65\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pvdwc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:56Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.456872 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:42:56 crc kubenswrapper[5133]: E1121 13:42:56.457070 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.471351 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.471386 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.471395 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.471411 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.471422 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:42:56Z","lastTransitionTime":"2025-11-21T13:42:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.574952 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.575027 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.575040 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.575060 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.575071 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:42:56Z","lastTransitionTime":"2025-11-21T13:42:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.677793 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.677864 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.677881 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.677907 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.677928 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:42:56Z","lastTransitionTime":"2025-11-21T13:42:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.699652 5133 generic.go:334] "Generic (PLEG): container finished" podID="c0ec3a98-4d89-4f36-a79e-ac65da8672ca" containerID="775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50" exitCode=0 Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.699728 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-b9v8b" event={"ID":"c0ec3a98-4d89-4f36-a79e-ac65da8672ca","Type":"ContainerDied","Data":"775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50"} Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.701622 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-bj52j" event={"ID":"f9cc533c-2914-45d2-97b4-d6e35361450d","Type":"ContainerStarted","Data":"b1cfd49e0e5564696bd26f92acb10ca3430f81dc3f690a51ecd7bfa14876bccb"} Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.701687 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-bj52j" event={"ID":"f9cc533c-2914-45d2-97b4-d6e35361450d","Type":"ContainerStarted","Data":"2619d27c2b2b5660232a68267ec1c5712073d1e78ec3ed4570f5d3691b8b1789"} Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.724418 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" event={"ID":"373d5da7-fae9-4689-9ede-6e2d69a54c02","Type":"ContainerStarted","Data":"c53aca99f41348a8343f7a2a2afd9ca78e2e4ba6aae9bb06cdb3ed66c9d79aa8"} Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.748299 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:56Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.779724 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.779762 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.779772 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.779788 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.779798 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:42:56Z","lastTransitionTime":"2025-11-21T13:42:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.783664 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:56Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.800670 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pvdwc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87822156-53e8-4eb5-b241-db506a21a1b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc374f8a6deccf60de941df327adc9f29d951a10746bd754c0d4f5573a141a71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jzt65\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pvdwc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:56Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.811681 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c335cd8-618b-4871-a0e2-deaa61ddc49a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5f6320dbfb8d910e52de319fe5350b435c1c9f00a2e1d5b2b953fb6d1688984\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://273a23deb0bee7d80bc12f28a4056a5b843e81cc7c411273e49c3aa0fdba5182\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c93dddad8f7a853e1302ba96f3fc6d6626b22de64c8cfd1ee63996820d0816cd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebb7634a6507b2323d36c3d57b19c374862e0bada0e81150da9db315e5812f12\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:56Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.822841 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1b5e12d17b3e683349818698223816569514a9f4ae5d14ba1f5661c472fce39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:56Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.833226 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bj52j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9cc533c-2914-45d2-97b4-d6e35361450d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdp6q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bj52j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:56Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.846753 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m5d24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0077329a-abad-4c6d-a601-2dc01fd83184\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24fe246ff402a8854ee5e55ccc507a2e497dbb2cdfed3f0f8b380f00b9436661\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmd8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m5d24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:56Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.855836 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f5a729-05d1-4f84-a216-1df3233af57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c46519115b067feef9d8fb5783b8b9420bf99d97515021a7d389e6cdf1d64112\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e5c730e837240b2ed45dff8a5411b8b49d21e7fbfb2dfcc6aef568b73b57745\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xxlvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:56Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.874243 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"373d5da7-fae9-4689-9ede-6e2d69a54c02\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tjzm8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:56Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.883465 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.883879 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.883896 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.883916 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.883930 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:42:56Z","lastTransitionTime":"2025-11-21T13:42:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.888476 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b9v8b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0ec3a98-4d89-4f36-a79e-ac65da8672ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b9v8b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:56Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:56 crc kubenswrapper[5133]: I1121 13:42:56.902240 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f826000-be5b-4f8f-bdc5-b80e11bb5e65\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cac876542527f108f89313704d6275aed6b735176f7f38b0fccbfcd79fdbf6e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa9a560543d545bd50cbb9aa0e907a992f9b3afb36de7ec5e72010dd835d2574\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c86055d41460f757efc29eaa62834faf3f14f9ca5ba534479d0fcd0a43d3bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T13:42:47Z\\\",\\\"message\\\":\\\"le observer\\\\nW1121 13:42:47.565555 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 13:42:47.567527 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 13:42:47.569658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3912986073/tls.crt::/tmp/serving-cert-3912986073/tls.key\\\\\\\"\\\\nI1121 13:42:47.852533 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 13:42:47.856751 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 13:42:47.856781 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 13:42:47.856814 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 13:42:47.856821 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 13:42:47.862211 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 13:42:47.862280 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862290 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862309 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 13:42:47.862319 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 13:42:47.862326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 13:42:47.862333 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 13:42:47.863057 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 13:42:47.865438 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6b16c41d8bc248fc4de65102a71d3875d1ab768432f61581605fa487ebfc9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:56Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.025930 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede95ef8b82acda5cadd081a37fcb2a35fab8269c7ec403bb33a6feb8bf9eb88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3037d1f01bc9704cae9aa3eb4760e4dc737b1990a6ae5a007d3ec412efad85a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:56Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.028109 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.028144 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.028156 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.028180 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.028193 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:42:57Z","lastTransitionTime":"2025-11-21T13:42:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.041254 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:57Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.054027 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1236e5c5c7c8db59fd2faa688e9b781fe94721cc8aa644dd9ab91df2684c617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:57Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.072696 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:57Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.082724 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pvdwc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87822156-53e8-4eb5-b241-db506a21a1b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc374f8a6deccf60de941df327adc9f29d951a10746bd754c0d4f5573a141a71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jzt65\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pvdwc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:57Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.092732 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c335cd8-618b-4871-a0e2-deaa61ddc49a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5f6320dbfb8d910e52de319fe5350b435c1c9f00a2e1d5b2b953fb6d1688984\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://273a23deb0bee7d80bc12f28a4056a5b843e81cc7c411273e49c3aa0fdba5182\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c93dddad8f7a853e1302ba96f3fc6d6626b22de64c8cfd1ee63996820d0816cd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebb7634a6507b2323d36c3d57b19c374862e0bada0e81150da9db315e5812f12\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:57Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.110031 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1b5e12d17b3e683349818698223816569514a9f4ae5d14ba1f5661c472fce39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:57Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.129981 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.130034 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.130046 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.130061 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.130072 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:42:57Z","lastTransitionTime":"2025-11-21T13:42:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.146225 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bj52j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9cc533c-2914-45d2-97b4-d6e35361450d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b1cfd49e0e5564696bd26f92acb10ca3430f81dc3f690a51ecd7bfa14876bccb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdp6q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bj52j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:57Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.187093 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f5a729-05d1-4f84-a216-1df3233af57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c46519115b067feef9d8fb5783b8b9420bf99d97515021a7d389e6cdf1d64112\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e5c730e837240b2ed45dff8a5411b8b49d21e7fbfb2dfcc6aef568b73b57745\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xxlvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:57Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.231703 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.231750 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.231764 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.231784 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.231817 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:42:57Z","lastTransitionTime":"2025-11-21T13:42:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.235394 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"373d5da7-fae9-4689-9ede-6e2d69a54c02\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tjzm8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:57Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.272121 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b9v8b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0ec3a98-4d89-4f36-a79e-ac65da8672ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b9v8b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:57Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.308941 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f826000-be5b-4f8f-bdc5-b80e11bb5e65\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cac876542527f108f89313704d6275aed6b735176f7f38b0fccbfcd79fdbf6e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa9a560543d545bd50cbb9aa0e907a992f9b3afb36de7ec5e72010dd835d2574\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c86055d41460f757efc29eaa62834faf3f14f9ca5ba534479d0fcd0a43d3bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T13:42:47Z\\\",\\\"message\\\":\\\"le observer\\\\nW1121 13:42:47.565555 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 13:42:47.567527 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 13:42:47.569658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3912986073/tls.crt::/tmp/serving-cert-3912986073/tls.key\\\\\\\"\\\\nI1121 13:42:47.852533 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 13:42:47.856751 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 13:42:47.856781 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 13:42:47.856814 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 13:42:47.856821 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 13:42:47.862211 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 13:42:47.862280 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862290 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862309 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 13:42:47.862319 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 13:42:47.862326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 13:42:47.862333 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 13:42:47.863057 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 13:42:47.865438 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6b16c41d8bc248fc4de65102a71d3875d1ab768432f61581605fa487ebfc9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:57Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.333983 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.334036 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.334047 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.334065 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.334079 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:42:57Z","lastTransitionTime":"2025-11-21T13:42:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.351281 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede95ef8b82acda5cadd081a37fcb2a35fab8269c7ec403bb33a6feb8bf9eb88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3037d1f01bc9704cae9aa3eb4760e4dc737b1990a6ae5a007d3ec412efad85a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:57Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.392297 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:57Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.426922 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1236e5c5c7c8db59fd2faa688e9b781fe94721cc8aa644dd9ab91df2684c617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:57Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.436647 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.436683 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.436695 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.436711 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.436722 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:42:57Z","lastTransitionTime":"2025-11-21T13:42:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.456575 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:42:57 crc kubenswrapper[5133]: E1121 13:42:57.456679 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.456817 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:42:57 crc kubenswrapper[5133]: E1121 13:42:57.457081 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.469046 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m5d24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0077329a-abad-4c6d-a601-2dc01fd83184\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24fe246ff402a8854ee5e55ccc507a2e497dbb2cdfed3f0f8b380f00b9436661\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmd8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m5d24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:57Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.513047 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:57Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.539388 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.539419 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.539427 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.539441 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.539451 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:42:57Z","lastTransitionTime":"2025-11-21T13:42:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.643463 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.643521 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.643534 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.643552 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.643565 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:42:57Z","lastTransitionTime":"2025-11-21T13:42:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.731881 5133 generic.go:334] "Generic (PLEG): container finished" podID="c0ec3a98-4d89-4f36-a79e-ac65da8672ca" containerID="6d21a06ad72a199d989c29326725c9d49df6fbb9fc6e9d54bcd1f7bb89c78b02" exitCode=0 Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.732019 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-b9v8b" event={"ID":"c0ec3a98-4d89-4f36-a79e-ac65da8672ca","Type":"ContainerDied","Data":"6d21a06ad72a199d989c29326725c9d49df6fbb9fc6e9d54bcd1f7bb89c78b02"} Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.746358 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.746416 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.746452 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.746482 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.746505 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:42:57Z","lastTransitionTime":"2025-11-21T13:42:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.751534 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:57Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.768689 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pvdwc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87822156-53e8-4eb5-b241-db506a21a1b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc374f8a6deccf60de941df327adc9f29d951a10746bd754c0d4f5573a141a71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jzt65\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pvdwc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:57Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.787499 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c335cd8-618b-4871-a0e2-deaa61ddc49a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5f6320dbfb8d910e52de319fe5350b435c1c9f00a2e1d5b2b953fb6d1688984\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://273a23deb0bee7d80bc12f28a4056a5b843e81cc7c411273e49c3aa0fdba5182\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c93dddad8f7a853e1302ba96f3fc6d6626b22de64c8cfd1ee63996820d0816cd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebb7634a6507b2323d36c3d57b19c374862e0bada0e81150da9db315e5812f12\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:57Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.809949 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1b5e12d17b3e683349818698223816569514a9f4ae5d14ba1f5661c472fce39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:57Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.848727 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.848766 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.848777 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.848795 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.848807 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:42:57Z","lastTransitionTime":"2025-11-21T13:42:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.861095 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bj52j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9cc533c-2914-45d2-97b4-d6e35361450d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b1cfd49e0e5564696bd26f92acb10ca3430f81dc3f690a51ecd7bfa14876bccb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdp6q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bj52j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:57Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.882386 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"373d5da7-fae9-4689-9ede-6e2d69a54c02\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tjzm8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:57Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.906375 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b9v8b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0ec3a98-4d89-4f36-a79e-ac65da8672ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d21a06ad72a199d989c29326725c9d49df6fbb9fc6e9d54bcd1f7bb89c78b02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6d21a06ad72a199d989c29326725c9d49df6fbb9fc6e9d54bcd1f7bb89c78b02\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b9v8b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:57Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.925214 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f826000-be5b-4f8f-bdc5-b80e11bb5e65\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cac876542527f108f89313704d6275aed6b735176f7f38b0fccbfcd79fdbf6e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa9a560543d545bd50cbb9aa0e907a992f9b3afb36de7ec5e72010dd835d2574\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c86055d41460f757efc29eaa62834faf3f14f9ca5ba534479d0fcd0a43d3bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T13:42:47Z\\\",\\\"message\\\":\\\"le observer\\\\nW1121 13:42:47.565555 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 13:42:47.567527 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 13:42:47.569658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3912986073/tls.crt::/tmp/serving-cert-3912986073/tls.key\\\\\\\"\\\\nI1121 13:42:47.852533 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 13:42:47.856751 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 13:42:47.856781 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 13:42:47.856814 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 13:42:47.856821 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 13:42:47.862211 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 13:42:47.862280 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862290 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862309 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 13:42:47.862319 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 13:42:47.862326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 13:42:47.862333 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 13:42:47.863057 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 13:42:47.865438 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6b16c41d8bc248fc4de65102a71d3875d1ab768432f61581605fa487ebfc9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:57Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.939062 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede95ef8b82acda5cadd081a37fcb2a35fab8269c7ec403bb33a6feb8bf9eb88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3037d1f01bc9704cae9aa3eb4760e4dc737b1990a6ae5a007d3ec412efad85a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:57Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.950876 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.950901 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.950910 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.950924 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.950933 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:42:57Z","lastTransitionTime":"2025-11-21T13:42:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.952796 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:57Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.965149 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1236e5c5c7c8db59fd2faa688e9b781fe94721cc8aa644dd9ab91df2684c617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:57Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:57 crc kubenswrapper[5133]: I1121 13:42:57.988340 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m5d24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0077329a-abad-4c6d-a601-2dc01fd83184\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24fe246ff402a8854ee5e55ccc507a2e497dbb2cdfed3f0f8b380f00b9436661\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmd8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m5d24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:57Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:58 crc kubenswrapper[5133]: I1121 13:42:58.027889 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f5a729-05d1-4f84-a216-1df3233af57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c46519115b067feef9d8fb5783b8b9420bf99d97515021a7d389e6cdf1d64112\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e5c730e837240b2ed45dff8a5411b8b49d21e7fbfb2dfcc6aef568b73b57745\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xxlvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:58Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:58 crc kubenswrapper[5133]: I1121 13:42:58.053082 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:58 crc kubenswrapper[5133]: I1121 13:42:58.053138 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:58 crc kubenswrapper[5133]: I1121 13:42:58.053150 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:58 crc kubenswrapper[5133]: I1121 13:42:58.053169 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:42:58 crc kubenswrapper[5133]: I1121 13:42:58.053182 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:42:58Z","lastTransitionTime":"2025-11-21T13:42:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:42:58 crc kubenswrapper[5133]: I1121 13:42:58.068054 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:58Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:58 crc kubenswrapper[5133]: I1121 13:42:58.156775 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:58 crc kubenswrapper[5133]: I1121 13:42:58.156833 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:58 crc kubenswrapper[5133]: I1121 13:42:58.156846 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:58 crc kubenswrapper[5133]: I1121 13:42:58.156863 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:42:58 crc kubenswrapper[5133]: I1121 13:42:58.156874 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:42:58Z","lastTransitionTime":"2025-11-21T13:42:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:42:58 crc kubenswrapper[5133]: I1121 13:42:58.259343 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:58 crc kubenswrapper[5133]: I1121 13:42:58.259411 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:58 crc kubenswrapper[5133]: I1121 13:42:58.259429 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:58 crc kubenswrapper[5133]: I1121 13:42:58.259455 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:42:58 crc kubenswrapper[5133]: I1121 13:42:58.259474 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:42:58Z","lastTransitionTime":"2025-11-21T13:42:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:42:58 crc kubenswrapper[5133]: I1121 13:42:58.361907 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:58 crc kubenswrapper[5133]: I1121 13:42:58.361960 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:58 crc kubenswrapper[5133]: I1121 13:42:58.361978 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:58 crc kubenswrapper[5133]: I1121 13:42:58.362032 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:42:58 crc kubenswrapper[5133]: I1121 13:42:58.362051 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:42:58Z","lastTransitionTime":"2025-11-21T13:42:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:42:58 crc kubenswrapper[5133]: I1121 13:42:58.456778 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:42:58 crc kubenswrapper[5133]: E1121 13:42:58.456926 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 13:42:58 crc kubenswrapper[5133]: I1121 13:42:58.463832 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:58 crc kubenswrapper[5133]: I1121 13:42:58.463861 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:58 crc kubenswrapper[5133]: I1121 13:42:58.463872 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:58 crc kubenswrapper[5133]: I1121 13:42:58.463888 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:42:58 crc kubenswrapper[5133]: I1121 13:42:58.463899 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:42:58Z","lastTransitionTime":"2025-11-21T13:42:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:42:58 crc kubenswrapper[5133]: I1121 13:42:58.568163 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:58 crc kubenswrapper[5133]: I1121 13:42:58.568233 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:58 crc kubenswrapper[5133]: I1121 13:42:58.568247 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:58 crc kubenswrapper[5133]: I1121 13:42:58.568266 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:42:58 crc kubenswrapper[5133]: I1121 13:42:58.568284 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:42:58Z","lastTransitionTime":"2025-11-21T13:42:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:42:58 crc kubenswrapper[5133]: I1121 13:42:58.672518 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:58 crc kubenswrapper[5133]: I1121 13:42:58.672581 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:58 crc kubenswrapper[5133]: I1121 13:42:58.672603 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:58 crc kubenswrapper[5133]: I1121 13:42:58.672631 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:42:58 crc kubenswrapper[5133]: I1121 13:42:58.672652 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:42:58Z","lastTransitionTime":"2025-11-21T13:42:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:42:58 crc kubenswrapper[5133]: I1121 13:42:58.741461 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" event={"ID":"373d5da7-fae9-4689-9ede-6e2d69a54c02","Type":"ContainerStarted","Data":"6ab3fdf87c8fc052cd429333579ede0e857fcc8399de947f661b159e6a5f2a93"} Nov 21 13:42:58 crc kubenswrapper[5133]: I1121 13:42:58.748567 5133 generic.go:334] "Generic (PLEG): container finished" podID="c0ec3a98-4d89-4f36-a79e-ac65da8672ca" containerID="7216da1e17ab61329ca71325b40fdbd040dbf83f072d565302a571acb1313e53" exitCode=0 Nov 21 13:42:58 crc kubenswrapper[5133]: I1121 13:42:58.748688 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-b9v8b" event={"ID":"c0ec3a98-4d89-4f36-a79e-ac65da8672ca","Type":"ContainerDied","Data":"7216da1e17ab61329ca71325b40fdbd040dbf83f072d565302a571acb1313e53"} Nov 21 13:42:58 crc kubenswrapper[5133]: I1121 13:42:58.776154 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:58 crc kubenswrapper[5133]: I1121 13:42:58.776409 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:58 crc kubenswrapper[5133]: I1121 13:42:58.776434 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:58 crc kubenswrapper[5133]: I1121 13:42:58.776519 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:42:58 crc kubenswrapper[5133]: I1121 13:42:58.776539 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:42:58Z","lastTransitionTime":"2025-11-21T13:42:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:42:58 crc kubenswrapper[5133]: I1121 13:42:58.777709 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c335cd8-618b-4871-a0e2-deaa61ddc49a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5f6320dbfb8d910e52de319fe5350b435c1c9f00a2e1d5b2b953fb6d1688984\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://273a23deb0bee7d80bc12f28a4056a5b843e81cc7c411273e49c3aa0fdba5182\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c93dddad8f7a853e1302ba96f3fc6d6626b22de64c8cfd1ee63996820d0816cd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebb7634a6507b2323d36c3d57b19c374862e0bada0e81150da9db315e5812f12\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:58Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:58 crc kubenswrapper[5133]: I1121 13:42:58.797816 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1b5e12d17b3e683349818698223816569514a9f4ae5d14ba1f5661c472fce39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:58Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:58 crc kubenswrapper[5133]: I1121 13:42:58.814660 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bj52j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9cc533c-2914-45d2-97b4-d6e35361450d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b1cfd49e0e5564696bd26f92acb10ca3430f81dc3f690a51ecd7bfa14876bccb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdp6q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bj52j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:58Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:58 crc kubenswrapper[5133]: I1121 13:42:58.843278 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f826000-be5b-4f8f-bdc5-b80e11bb5e65\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cac876542527f108f89313704d6275aed6b735176f7f38b0fccbfcd79fdbf6e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa9a560543d545bd50cbb9aa0e907a992f9b3afb36de7ec5e72010dd835d2574\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c86055d41460f757efc29eaa62834faf3f14f9ca5ba534479d0fcd0a43d3bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T13:42:47Z\\\",\\\"message\\\":\\\"le observer\\\\nW1121 13:42:47.565555 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 13:42:47.567527 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 13:42:47.569658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3912986073/tls.crt::/tmp/serving-cert-3912986073/tls.key\\\\\\\"\\\\nI1121 13:42:47.852533 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 13:42:47.856751 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 13:42:47.856781 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 13:42:47.856814 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 13:42:47.856821 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 13:42:47.862211 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 13:42:47.862280 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862290 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862309 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 13:42:47.862319 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 13:42:47.862326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 13:42:47.862333 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 13:42:47.863057 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 13:42:47.865438 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6b16c41d8bc248fc4de65102a71d3875d1ab768432f61581605fa487ebfc9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:58Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:58 crc kubenswrapper[5133]: I1121 13:42:58.864347 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede95ef8b82acda5cadd081a37fcb2a35fab8269c7ec403bb33a6feb8bf9eb88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3037d1f01bc9704cae9aa3eb4760e4dc737b1990a6ae5a007d3ec412efad85a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:58Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:58 crc kubenswrapper[5133]: I1121 13:42:58.879931 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:58 crc kubenswrapper[5133]: I1121 13:42:58.879974 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:58 crc kubenswrapper[5133]: I1121 13:42:58.880046 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:58 crc kubenswrapper[5133]: I1121 13:42:58.880073 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:42:58 crc kubenswrapper[5133]: I1121 13:42:58.880148 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:42:58Z","lastTransitionTime":"2025-11-21T13:42:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:42:58 crc kubenswrapper[5133]: I1121 13:42:58.885458 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:58Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:58 crc kubenswrapper[5133]: I1121 13:42:58.898724 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1236e5c5c7c8db59fd2faa688e9b781fe94721cc8aa644dd9ab91df2684c617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:58Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:58 crc kubenswrapper[5133]: I1121 13:42:58.911663 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m5d24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0077329a-abad-4c6d-a601-2dc01fd83184\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24fe246ff402a8854ee5e55ccc507a2e497dbb2cdfed3f0f8b380f00b9436661\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmd8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m5d24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:58Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:58 crc kubenswrapper[5133]: I1121 13:42:58.925180 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f5a729-05d1-4f84-a216-1df3233af57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c46519115b067feef9d8fb5783b8b9420bf99d97515021a7d389e6cdf1d64112\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e5c730e837240b2ed45dff8a5411b8b49d21e7fbfb2dfcc6aef568b73b57745\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xxlvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:58Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:58 crc kubenswrapper[5133]: I1121 13:42:58.949088 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"373d5da7-fae9-4689-9ede-6e2d69a54c02\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tjzm8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:58Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:58 crc kubenswrapper[5133]: I1121 13:42:58.964096 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b9v8b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0ec3a98-4d89-4f36-a79e-ac65da8672ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d21a06ad72a199d989c29326725c9d49df6fbb9fc6e9d54bcd1f7bb89c78b02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6d21a06ad72a199d989c29326725c9d49df6fbb9fc6e9d54bcd1f7bb89c78b02\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7216da1e17ab61329ca71325b40fdbd040dbf83f072d565302a571acb1313e53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7216da1e17ab61329ca71325b40fdbd040dbf83f072d565302a571acb1313e53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b9v8b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:58Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:58 crc kubenswrapper[5133]: I1121 13:42:58.975507 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:58Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:58 crc kubenswrapper[5133]: I1121 13:42:58.986026 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:58Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:58 crc kubenswrapper[5133]: I1121 13:42:58.991022 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:58 crc kubenswrapper[5133]: I1121 13:42:58.991062 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:58 crc kubenswrapper[5133]: I1121 13:42:58.991074 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:58 crc kubenswrapper[5133]: I1121 13:42:58.991090 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:42:58 crc kubenswrapper[5133]: I1121 13:42:58.991104 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:42:58Z","lastTransitionTime":"2025-11-21T13:42:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:42:58 crc kubenswrapper[5133]: I1121 13:42:58.995106 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pvdwc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87822156-53e8-4eb5-b241-db506a21a1b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc374f8a6deccf60de941df327adc9f29d951a10746bd754c0d4f5573a141a71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jzt65\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pvdwc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:58Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:59 crc kubenswrapper[5133]: I1121 13:42:59.093222 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:59 crc kubenswrapper[5133]: I1121 13:42:59.093266 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:59 crc kubenswrapper[5133]: I1121 13:42:59.093278 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:59 crc kubenswrapper[5133]: I1121 13:42:59.093293 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:42:59 crc kubenswrapper[5133]: I1121 13:42:59.093305 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:42:59Z","lastTransitionTime":"2025-11-21T13:42:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:42:59 crc kubenswrapper[5133]: I1121 13:42:59.196931 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:59 crc kubenswrapper[5133]: I1121 13:42:59.196988 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:59 crc kubenswrapper[5133]: I1121 13:42:59.197048 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:59 crc kubenswrapper[5133]: I1121 13:42:59.197086 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:42:59 crc kubenswrapper[5133]: I1121 13:42:59.197104 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:42:59Z","lastTransitionTime":"2025-11-21T13:42:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:42:59 crc kubenswrapper[5133]: I1121 13:42:59.300267 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:59 crc kubenswrapper[5133]: I1121 13:42:59.300337 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:59 crc kubenswrapper[5133]: I1121 13:42:59.300362 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:59 crc kubenswrapper[5133]: I1121 13:42:59.300390 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:42:59 crc kubenswrapper[5133]: I1121 13:42:59.300412 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:42:59Z","lastTransitionTime":"2025-11-21T13:42:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:42:59 crc kubenswrapper[5133]: I1121 13:42:59.403230 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:59 crc kubenswrapper[5133]: I1121 13:42:59.403270 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:59 crc kubenswrapper[5133]: I1121 13:42:59.403281 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:59 crc kubenswrapper[5133]: I1121 13:42:59.403298 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:42:59 crc kubenswrapper[5133]: I1121 13:42:59.403310 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:42:59Z","lastTransitionTime":"2025-11-21T13:42:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:42:59 crc kubenswrapper[5133]: I1121 13:42:59.456915 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:42:59 crc kubenswrapper[5133]: I1121 13:42:59.456965 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:42:59 crc kubenswrapper[5133]: E1121 13:42:59.457065 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 13:42:59 crc kubenswrapper[5133]: E1121 13:42:59.457163 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 13:42:59 crc kubenswrapper[5133]: I1121 13:42:59.506652 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:59 crc kubenswrapper[5133]: I1121 13:42:59.506691 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:59 crc kubenswrapper[5133]: I1121 13:42:59.506702 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:59 crc kubenswrapper[5133]: I1121 13:42:59.506717 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:42:59 crc kubenswrapper[5133]: I1121 13:42:59.506729 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:42:59Z","lastTransitionTime":"2025-11-21T13:42:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:42:59 crc kubenswrapper[5133]: I1121 13:42:59.610885 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:59 crc kubenswrapper[5133]: I1121 13:42:59.610943 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:59 crc kubenswrapper[5133]: I1121 13:42:59.610963 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:59 crc kubenswrapper[5133]: I1121 13:42:59.610988 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:42:59 crc kubenswrapper[5133]: I1121 13:42:59.611041 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:42:59Z","lastTransitionTime":"2025-11-21T13:42:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:42:59 crc kubenswrapper[5133]: I1121 13:42:59.713697 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:59 crc kubenswrapper[5133]: I1121 13:42:59.713735 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:59 crc kubenswrapper[5133]: I1121 13:42:59.713746 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:59 crc kubenswrapper[5133]: I1121 13:42:59.713760 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:42:59 crc kubenswrapper[5133]: I1121 13:42:59.713772 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:42:59Z","lastTransitionTime":"2025-11-21T13:42:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:42:59 crc kubenswrapper[5133]: I1121 13:42:59.759254 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-b9v8b" event={"ID":"c0ec3a98-4d89-4f36-a79e-ac65da8672ca","Type":"ContainerStarted","Data":"b3a466e70f0711cd7d64221f62309d99dfa9e4b9910a69b2397240cfdf244578"} Nov 21 13:42:59 crc kubenswrapper[5133]: I1121 13:42:59.775734 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pvdwc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87822156-53e8-4eb5-b241-db506a21a1b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc374f8a6deccf60de941df327adc9f29d951a10746bd754c0d4f5573a141a71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jzt65\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pvdwc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:59Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:59 crc kubenswrapper[5133]: I1121 13:42:59.795835 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:59Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:59 crc kubenswrapper[5133]: I1121 13:42:59.810104 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1b5e12d17b3e683349818698223816569514a9f4ae5d14ba1f5661c472fce39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:59Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:59 crc kubenswrapper[5133]: I1121 13:42:59.816247 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:59 crc kubenswrapper[5133]: I1121 13:42:59.816292 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:59 crc kubenswrapper[5133]: I1121 13:42:59.816302 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:59 crc kubenswrapper[5133]: I1121 13:42:59.816320 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:42:59 crc kubenswrapper[5133]: I1121 13:42:59.816331 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:42:59Z","lastTransitionTime":"2025-11-21T13:42:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:42:59 crc kubenswrapper[5133]: I1121 13:42:59.822644 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bj52j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9cc533c-2914-45d2-97b4-d6e35361450d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b1cfd49e0e5564696bd26f92acb10ca3430f81dc3f690a51ecd7bfa14876bccb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdp6q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bj52j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:59Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:59 crc kubenswrapper[5133]: I1121 13:42:59.839258 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c335cd8-618b-4871-a0e2-deaa61ddc49a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5f6320dbfb8d910e52de319fe5350b435c1c9f00a2e1d5b2b953fb6d1688984\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://273a23deb0bee7d80bc12f28a4056a5b843e81cc7c411273e49c3aa0fdba5182\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c93dddad8f7a853e1302ba96f3fc6d6626b22de64c8cfd1ee63996820d0816cd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebb7634a6507b2323d36c3d57b19c374862e0bada0e81150da9db315e5812f12\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:59Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:59 crc kubenswrapper[5133]: I1121 13:42:59.858203 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede95ef8b82acda5cadd081a37fcb2a35fab8269c7ec403bb33a6feb8bf9eb88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3037d1f01bc9704cae9aa3eb4760e4dc737b1990a6ae5a007d3ec412efad85a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:59Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:59 crc kubenswrapper[5133]: I1121 13:42:59.876072 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:59Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:59 crc kubenswrapper[5133]: I1121 13:42:59.892591 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1236e5c5c7c8db59fd2faa688e9b781fe94721cc8aa644dd9ab91df2684c617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:59Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:59 crc kubenswrapper[5133]: I1121 13:42:59.913664 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m5d24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0077329a-abad-4c6d-a601-2dc01fd83184\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24fe246ff402a8854ee5e55ccc507a2e497dbb2cdfed3f0f8b380f00b9436661\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmd8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m5d24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:59Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:59 crc kubenswrapper[5133]: I1121 13:42:59.919794 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:42:59 crc kubenswrapper[5133]: I1121 13:42:59.919859 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:42:59 crc kubenswrapper[5133]: I1121 13:42:59.919878 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:42:59 crc kubenswrapper[5133]: I1121 13:42:59.919902 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:42:59 crc kubenswrapper[5133]: I1121 13:42:59.919920 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:42:59Z","lastTransitionTime":"2025-11-21T13:42:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:42:59 crc kubenswrapper[5133]: I1121 13:42:59.933908 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f5a729-05d1-4f84-a216-1df3233af57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c46519115b067feef9d8fb5783b8b9420bf99d97515021a7d389e6cdf1d64112\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e5c730e837240b2ed45dff8a5411b8b49d21e7fbfb2dfcc6aef568b73b57745\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xxlvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:59Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:59 crc kubenswrapper[5133]: I1121 13:42:59.967436 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"373d5da7-fae9-4689-9ede-6e2d69a54c02\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tjzm8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:59Z is after 2025-08-24T17:21:41Z" Nov 21 13:42:59 crc kubenswrapper[5133]: I1121 13:42:59.993250 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b9v8b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0ec3a98-4d89-4f36-a79e-ac65da8672ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d21a06ad72a199d989c29326725c9d49df6fbb9fc6e9d54bcd1f7bb89c78b02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6d21a06ad72a199d989c29326725c9d49df6fbb9fc6e9d54bcd1f7bb89c78b02\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7216da1e17ab61329ca71325b40fdbd040dbf83f072d565302a571acb1313e53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7216da1e17ab61329ca71325b40fdbd040dbf83f072d565302a571acb1313e53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a466e70f0711cd7d64221f62309d99dfa9e4b9910a69b2397240cfdf244578\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b9v8b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:42:59Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:00 crc kubenswrapper[5133]: I1121 13:43:00.019041 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f826000-be5b-4f8f-bdc5-b80e11bb5e65\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cac876542527f108f89313704d6275aed6b735176f7f38b0fccbfcd79fdbf6e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa9a560543d545bd50cbb9aa0e907a992f9b3afb36de7ec5e72010dd835d2574\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c86055d41460f757efc29eaa62834faf3f14f9ca5ba534479d0fcd0a43d3bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T13:42:47Z\\\",\\\"message\\\":\\\"le observer\\\\nW1121 13:42:47.565555 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 13:42:47.567527 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 13:42:47.569658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3912986073/tls.crt::/tmp/serving-cert-3912986073/tls.key\\\\\\\"\\\\nI1121 13:42:47.852533 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 13:42:47.856751 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 13:42:47.856781 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 13:42:47.856814 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 13:42:47.856821 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 13:42:47.862211 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 13:42:47.862280 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862290 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862309 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 13:42:47.862319 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 13:42:47.862326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 13:42:47.862333 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 13:42:47.863057 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 13:42:47.865438 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6b16c41d8bc248fc4de65102a71d3875d1ab768432f61581605fa487ebfc9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:00Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:00 crc kubenswrapper[5133]: I1121 13:43:00.024079 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:00 crc kubenswrapper[5133]: I1121 13:43:00.024167 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:00 crc kubenswrapper[5133]: I1121 13:43:00.024195 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:00 crc kubenswrapper[5133]: I1121 13:43:00.024228 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:00 crc kubenswrapper[5133]: I1121 13:43:00.024251 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:00Z","lastTransitionTime":"2025-11-21T13:43:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:00 crc kubenswrapper[5133]: I1121 13:43:00.040573 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:00Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:00 crc kubenswrapper[5133]: I1121 13:43:00.128287 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:00 crc kubenswrapper[5133]: I1121 13:43:00.128348 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:00 crc kubenswrapper[5133]: I1121 13:43:00.128366 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:00 crc kubenswrapper[5133]: I1121 13:43:00.128392 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:00 crc kubenswrapper[5133]: I1121 13:43:00.128409 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:00Z","lastTransitionTime":"2025-11-21T13:43:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:00 crc kubenswrapper[5133]: I1121 13:43:00.232116 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:00 crc kubenswrapper[5133]: I1121 13:43:00.232609 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:00 crc kubenswrapper[5133]: I1121 13:43:00.232631 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:00 crc kubenswrapper[5133]: I1121 13:43:00.232656 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:00 crc kubenswrapper[5133]: I1121 13:43:00.232673 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:00Z","lastTransitionTime":"2025-11-21T13:43:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:00 crc kubenswrapper[5133]: I1121 13:43:00.335542 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:00 crc kubenswrapper[5133]: I1121 13:43:00.335605 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:00 crc kubenswrapper[5133]: I1121 13:43:00.335622 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:00 crc kubenswrapper[5133]: I1121 13:43:00.335650 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:00 crc kubenswrapper[5133]: I1121 13:43:00.335667 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:00Z","lastTransitionTime":"2025-11-21T13:43:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:00 crc kubenswrapper[5133]: I1121 13:43:00.439260 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:00 crc kubenswrapper[5133]: I1121 13:43:00.439323 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:00 crc kubenswrapper[5133]: I1121 13:43:00.439340 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:00 crc kubenswrapper[5133]: I1121 13:43:00.439365 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:00 crc kubenswrapper[5133]: I1121 13:43:00.439382 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:00Z","lastTransitionTime":"2025-11-21T13:43:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:00 crc kubenswrapper[5133]: I1121 13:43:00.456758 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:43:00 crc kubenswrapper[5133]: E1121 13:43:00.456981 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 13:43:00 crc kubenswrapper[5133]: I1121 13:43:00.543293 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:00 crc kubenswrapper[5133]: I1121 13:43:00.543365 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:00 crc kubenswrapper[5133]: I1121 13:43:00.543396 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:00 crc kubenswrapper[5133]: I1121 13:43:00.543429 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:00 crc kubenswrapper[5133]: I1121 13:43:00.543453 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:00Z","lastTransitionTime":"2025-11-21T13:43:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:00 crc kubenswrapper[5133]: I1121 13:43:00.646795 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:00 crc kubenswrapper[5133]: I1121 13:43:00.646859 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:00 crc kubenswrapper[5133]: I1121 13:43:00.646876 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:00 crc kubenswrapper[5133]: I1121 13:43:00.646900 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:00 crc kubenswrapper[5133]: I1121 13:43:00.646917 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:00Z","lastTransitionTime":"2025-11-21T13:43:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:00 crc kubenswrapper[5133]: I1121 13:43:00.750261 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:00 crc kubenswrapper[5133]: I1121 13:43:00.750324 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:00 crc kubenswrapper[5133]: I1121 13:43:00.750340 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:00 crc kubenswrapper[5133]: I1121 13:43:00.750363 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:00 crc kubenswrapper[5133]: I1121 13:43:00.750379 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:00Z","lastTransitionTime":"2025-11-21T13:43:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:00 crc kubenswrapper[5133]: I1121 13:43:00.769094 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" event={"ID":"373d5da7-fae9-4689-9ede-6e2d69a54c02","Type":"ContainerStarted","Data":"62687577ee38477642562b9cb7ae3be1bf47b33bf17d61b2d5e5f95a4568b330"} Nov 21 13:43:00 crc kubenswrapper[5133]: I1121 13:43:00.769476 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:43:00 crc kubenswrapper[5133]: I1121 13:43:00.792134 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:00Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:00 crc kubenswrapper[5133]: I1121 13:43:00.811659 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:00Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:00 crc kubenswrapper[5133]: I1121 13:43:00.829684 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:43:00 crc kubenswrapper[5133]: I1121 13:43:00.831173 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pvdwc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87822156-53e8-4eb5-b241-db506a21a1b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc374f8a6deccf60de941df327adc9f29d951a10746bd754c0d4f5573a141a71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jzt65\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pvdwc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:00Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:00 crc kubenswrapper[5133]: I1121 13:43:00.853943 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:00 crc kubenswrapper[5133]: I1121 13:43:00.854030 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:00 crc kubenswrapper[5133]: I1121 13:43:00.854055 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:00 crc kubenswrapper[5133]: I1121 13:43:00.854084 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:00 crc kubenswrapper[5133]: I1121 13:43:00.854103 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:00Z","lastTransitionTime":"2025-11-21T13:43:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:00 crc kubenswrapper[5133]: I1121 13:43:00.857076 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c335cd8-618b-4871-a0e2-deaa61ddc49a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5f6320dbfb8d910e52de319fe5350b435c1c9f00a2e1d5b2b953fb6d1688984\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://273a23deb0bee7d80bc12f28a4056a5b843e81cc7c411273e49c3aa0fdba5182\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c93dddad8f7a853e1302ba96f3fc6d6626b22de64c8cfd1ee63996820d0816cd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebb7634a6507b2323d36c3d57b19c374862e0bada0e81150da9db315e5812f12\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:00Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:00 crc kubenswrapper[5133]: I1121 13:43:00.885908 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1b5e12d17b3e683349818698223816569514a9f4ae5d14ba1f5661c472fce39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:00Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:00 crc kubenswrapper[5133]: I1121 13:43:00.900168 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bj52j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9cc533c-2914-45d2-97b4-d6e35361450d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b1cfd49e0e5564696bd26f92acb10ca3430f81dc3f690a51ecd7bfa14876bccb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdp6q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bj52j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:00Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:00 crc kubenswrapper[5133]: I1121 13:43:00.920174 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m5d24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0077329a-abad-4c6d-a601-2dc01fd83184\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24fe246ff402a8854ee5e55ccc507a2e497dbb2cdfed3f0f8b380f00b9436661\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmd8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m5d24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:00Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:00 crc kubenswrapper[5133]: I1121 13:43:00.937907 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f5a729-05d1-4f84-a216-1df3233af57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c46519115b067feef9d8fb5783b8b9420bf99d97515021a7d389e6cdf1d64112\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e5c730e837240b2ed45dff8a5411b8b49d21e7fbfb2dfcc6aef568b73b57745\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xxlvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:00Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:00 crc kubenswrapper[5133]: I1121 13:43:00.957453 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:00 crc kubenswrapper[5133]: I1121 13:43:00.957675 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:00 crc kubenswrapper[5133]: I1121 13:43:00.957800 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:00 crc kubenswrapper[5133]: I1121 13:43:00.957915 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:00 crc kubenswrapper[5133]: I1121 13:43:00.958058 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:00Z","lastTransitionTime":"2025-11-21T13:43:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:00 crc kubenswrapper[5133]: I1121 13:43:00.972322 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"373d5da7-fae9-4689-9ede-6e2d69a54c02\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce076f27563e648bcbfd183634e87e0e31cedc359d0df1edc6af448b2a18f1a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b6bfce121246f367a034c172b839a31fe309cfc0f83db7ab4e48cb26d6a5145\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c53aca99f41348a8343f7a2a2afd9ca78e2e4ba6aae9bb06cdb3ed66c9d79aa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aee1dbbc6cd90fac255e86ddb27f159eba2e08dc6cc749a8eb351842330ee6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://563b9e061f37ddab57173a01efbf7bf025c470edccc47a03e7c5bb1e317a289f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd5003cc4327d8234259623232e844463af0efdc0b3e395fa3e2c30c714b872d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62687577ee38477642562b9cb7ae3be1bf47b33bf17d61b2d5e5f95a4568b330\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ab3fdf87c8fc052cd429333579ede0e857fcc8399de947f661b159e6a5f2a93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tjzm8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:00Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:00 crc kubenswrapper[5133]: I1121 13:43:00.994880 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b9v8b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0ec3a98-4d89-4f36-a79e-ac65da8672ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d21a06ad72a199d989c29326725c9d49df6fbb9fc6e9d54bcd1f7bb89c78b02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6d21a06ad72a199d989c29326725c9d49df6fbb9fc6e9d54bcd1f7bb89c78b02\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7216da1e17ab61329ca71325b40fdbd040dbf83f072d565302a571acb1313e53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7216da1e17ab61329ca71325b40fdbd040dbf83f072d565302a571acb1313e53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a466e70f0711cd7d64221f62309d99dfa9e4b9910a69b2397240cfdf244578\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b9v8b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:00Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.016948 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f826000-be5b-4f8f-bdc5-b80e11bb5e65\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cac876542527f108f89313704d6275aed6b735176f7f38b0fccbfcd79fdbf6e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa9a560543d545bd50cbb9aa0e907a992f9b3afb36de7ec5e72010dd835d2574\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c86055d41460f757efc29eaa62834faf3f14f9ca5ba534479d0fcd0a43d3bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T13:42:47Z\\\",\\\"message\\\":\\\"le observer\\\\nW1121 13:42:47.565555 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 13:42:47.567527 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 13:42:47.569658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3912986073/tls.crt::/tmp/serving-cert-3912986073/tls.key\\\\\\\"\\\\nI1121 13:42:47.852533 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 13:42:47.856751 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 13:42:47.856781 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 13:42:47.856814 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 13:42:47.856821 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 13:42:47.862211 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 13:42:47.862280 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862290 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862309 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 13:42:47.862319 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 13:42:47.862326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 13:42:47.862333 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 13:42:47.863057 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 13:42:47.865438 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6b16c41d8bc248fc4de65102a71d3875d1ab768432f61581605fa487ebfc9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:01Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.036561 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede95ef8b82acda5cadd081a37fcb2a35fab8269c7ec403bb33a6feb8bf9eb88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3037d1f01bc9704cae9aa3eb4760e4dc737b1990a6ae5a007d3ec412efad85a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:01Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.052903 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:01Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.061522 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.061587 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.061606 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.061630 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.061648 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:01Z","lastTransitionTime":"2025-11-21T13:43:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.071874 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1236e5c5c7c8db59fd2faa688e9b781fe94721cc8aa644dd9ab91df2684c617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:01Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.089476 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pvdwc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87822156-53e8-4eb5-b241-db506a21a1b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc374f8a6deccf60de941df327adc9f29d951a10746bd754c0d4f5573a141a71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jzt65\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pvdwc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:01Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.110580 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:01Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.131634 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1b5e12d17b3e683349818698223816569514a9f4ae5d14ba1f5661c472fce39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:01Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.143522 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bj52j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9cc533c-2914-45d2-97b4-d6e35361450d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b1cfd49e0e5564696bd26f92acb10ca3430f81dc3f690a51ecd7bfa14876bccb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdp6q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bj52j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:01Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.156532 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c335cd8-618b-4871-a0e2-deaa61ddc49a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5f6320dbfb8d910e52de319fe5350b435c1c9f00a2e1d5b2b953fb6d1688984\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://273a23deb0bee7d80bc12f28a4056a5b843e81cc7c411273e49c3aa0fdba5182\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c93dddad8f7a853e1302ba96f3fc6d6626b22de64c8cfd1ee63996820d0816cd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebb7634a6507b2323d36c3d57b19c374862e0bada0e81150da9db315e5812f12\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:01Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.164140 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.164244 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.164283 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.164308 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.164341 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:01Z","lastTransitionTime":"2025-11-21T13:43:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.170819 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede95ef8b82acda5cadd081a37fcb2a35fab8269c7ec403bb33a6feb8bf9eb88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3037d1f01bc9704cae9aa3eb4760e4dc737b1990a6ae5a007d3ec412efad85a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:01Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.189608 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:01Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.205892 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1236e5c5c7c8db59fd2faa688e9b781fe94721cc8aa644dd9ab91df2684c617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:01Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.226989 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m5d24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0077329a-abad-4c6d-a601-2dc01fd83184\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24fe246ff402a8854ee5e55ccc507a2e497dbb2cdfed3f0f8b380f00b9436661\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmd8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m5d24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:01Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.241086 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f5a729-05d1-4f84-a216-1df3233af57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c46519115b067feef9d8fb5783b8b9420bf99d97515021a7d389e6cdf1d64112\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e5c730e837240b2ed45dff8a5411b8b49d21e7fbfb2dfcc6aef568b73b57745\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xxlvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:01Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.267167 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.267227 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.267247 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.267271 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.267289 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:01Z","lastTransitionTime":"2025-11-21T13:43:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.273926 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"373d5da7-fae9-4689-9ede-6e2d69a54c02\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce076f27563e648bcbfd183634e87e0e31cedc359d0df1edc6af448b2a18f1a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b6bfce121246f367a034c172b839a31fe309cfc0f83db7ab4e48cb26d6a5145\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c53aca99f41348a8343f7a2a2afd9ca78e2e4ba6aae9bb06cdb3ed66c9d79aa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aee1dbbc6cd90fac255e86ddb27f159eba2e08dc6cc749a8eb351842330ee6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://563b9e061f37ddab57173a01efbf7bf025c470edccc47a03e7c5bb1e317a289f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd5003cc4327d8234259623232e844463af0efdc0b3e395fa3e2c30c714b872d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62687577ee38477642562b9cb7ae3be1bf47b33bf17d61b2d5e5f95a4568b330\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ab3fdf87c8fc052cd429333579ede0e857fcc8399de947f661b159e6a5f2a93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tjzm8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:01Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.323217 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b9v8b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0ec3a98-4d89-4f36-a79e-ac65da8672ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d21a06ad72a199d989c29326725c9d49df6fbb9fc6e9d54bcd1f7bb89c78b02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6d21a06ad72a199d989c29326725c9d49df6fbb9fc6e9d54bcd1f7bb89c78b02\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7216da1e17ab61329ca71325b40fdbd040dbf83f072d565302a571acb1313e53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7216da1e17ab61329ca71325b40fdbd040dbf83f072d565302a571acb1313e53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a466e70f0711cd7d64221f62309d99dfa9e4b9910a69b2397240cfdf244578\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b9v8b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:01Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.359178 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f826000-be5b-4f8f-bdc5-b80e11bb5e65\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cac876542527f108f89313704d6275aed6b735176f7f38b0fccbfcd79fdbf6e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa9a560543d545bd50cbb9aa0e907a992f9b3afb36de7ec5e72010dd835d2574\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c86055d41460f757efc29eaa62834faf3f14f9ca5ba534479d0fcd0a43d3bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T13:42:47Z\\\",\\\"message\\\":\\\"le observer\\\\nW1121 13:42:47.565555 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 13:42:47.567527 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 13:42:47.569658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3912986073/tls.crt::/tmp/serving-cert-3912986073/tls.key\\\\\\\"\\\\nI1121 13:42:47.852533 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 13:42:47.856751 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 13:42:47.856781 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 13:42:47.856814 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 13:42:47.856821 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 13:42:47.862211 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 13:42:47.862280 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862290 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862309 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 13:42:47.862319 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 13:42:47.862326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 13:42:47.862333 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 13:42:47.863057 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 13:42:47.865438 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6b16c41d8bc248fc4de65102a71d3875d1ab768432f61581605fa487ebfc9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:01Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.368945 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.368975 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.368984 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.369027 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.369037 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:01Z","lastTransitionTime":"2025-11-21T13:43:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.374778 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:01Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.457154 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.457182 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:43:01 crc kubenswrapper[5133]: E1121 13:43:01.457306 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 13:43:01 crc kubenswrapper[5133]: E1121 13:43:01.457401 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.471469 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.471555 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.471570 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.471585 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.471597 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:01Z","lastTransitionTime":"2025-11-21T13:43:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.574901 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.574969 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.574989 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.575039 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.575058 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:01Z","lastTransitionTime":"2025-11-21T13:43:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.634313 5133 transport.go:147] "Certificate rotation detected, shutting down client connections to start using new credentials" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.678488 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.678546 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.678559 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.678576 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.678588 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:01Z","lastTransitionTime":"2025-11-21T13:43:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.776094 5133 generic.go:334] "Generic (PLEG): container finished" podID="c0ec3a98-4d89-4f36-a79e-ac65da8672ca" containerID="b3a466e70f0711cd7d64221f62309d99dfa9e4b9910a69b2397240cfdf244578" exitCode=0 Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.777129 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-b9v8b" event={"ID":"c0ec3a98-4d89-4f36-a79e-ac65da8672ca","Type":"ContainerDied","Data":"b3a466e70f0711cd7d64221f62309d99dfa9e4b9910a69b2397240cfdf244578"} Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.777200 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.777902 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.781703 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.782046 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.782089 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.782117 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.782142 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:01Z","lastTransitionTime":"2025-11-21T13:43:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.804846 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c335cd8-618b-4871-a0e2-deaa61ddc49a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5f6320dbfb8d910e52de319fe5350b435c1c9f00a2e1d5b2b953fb6d1688984\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://273a23deb0bee7d80bc12f28a4056a5b843e81cc7c411273e49c3aa0fdba5182\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c93dddad8f7a853e1302ba96f3fc6d6626b22de64c8cfd1ee63996820d0816cd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebb7634a6507b2323d36c3d57b19c374862e0bada0e81150da9db315e5812f12\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:01Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.827496 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1b5e12d17b3e683349818698223816569514a9f4ae5d14ba1f5661c472fce39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:01Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.844268 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bj52j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9cc533c-2914-45d2-97b4-d6e35361450d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b1cfd49e0e5564696bd26f92acb10ca3430f81dc3f690a51ecd7bfa14876bccb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdp6q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bj52j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:01Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.858459 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.861802 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1236e5c5c7c8db59fd2faa688e9b781fe94721cc8aa644dd9ab91df2684c617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:01Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.884924 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.885034 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.885060 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.885092 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.885113 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:01Z","lastTransitionTime":"2025-11-21T13:43:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.886696 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m5d24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0077329a-abad-4c6d-a601-2dc01fd83184\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24fe246ff402a8854ee5e55ccc507a2e497dbb2cdfed3f0f8b380f00b9436661\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmd8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m5d24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:01Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.904664 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f5a729-05d1-4f84-a216-1df3233af57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c46519115b067feef9d8fb5783b8b9420bf99d97515021a7d389e6cdf1d64112\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e5c730e837240b2ed45dff8a5411b8b49d21e7fbfb2dfcc6aef568b73b57745\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xxlvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:01Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.931400 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"373d5da7-fae9-4689-9ede-6e2d69a54c02\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce076f27563e648bcbfd183634e87e0e31cedc359d0df1edc6af448b2a18f1a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b6bfce121246f367a034c172b839a31fe309cfc0f83db7ab4e48cb26d6a5145\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c53aca99f41348a8343f7a2a2afd9ca78e2e4ba6aae9bb06cdb3ed66c9d79aa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aee1dbbc6cd90fac255e86ddb27f159eba2e08dc6cc749a8eb351842330ee6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://563b9e061f37ddab57173a01efbf7bf025c470edccc47a03e7c5bb1e317a289f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd5003cc4327d8234259623232e844463af0efdc0b3e395fa3e2c30c714b872d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62687577ee38477642562b9cb7ae3be1bf47b33bf17d61b2d5e5f95a4568b330\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ab3fdf87c8fc052cd429333579ede0e857fcc8399de947f661b159e6a5f2a93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tjzm8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:01Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.950606 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b9v8b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0ec3a98-4d89-4f36-a79e-ac65da8672ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d21a06ad72a199d989c29326725c9d49df6fbb9fc6e9d54bcd1f7bb89c78b02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6d21a06ad72a199d989c29326725c9d49df6fbb9fc6e9d54bcd1f7bb89c78b02\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7216da1e17ab61329ca71325b40fdbd040dbf83f072d565302a571acb1313e53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7216da1e17ab61329ca71325b40fdbd040dbf83f072d565302a571acb1313e53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a466e70f0711cd7d64221f62309d99dfa9e4b9910a69b2397240cfdf244578\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3a466e70f0711cd7d64221f62309d99dfa9e4b9910a69b2397240cfdf244578\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:43:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b9v8b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:01Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.970594 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f826000-be5b-4f8f-bdc5-b80e11bb5e65\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cac876542527f108f89313704d6275aed6b735176f7f38b0fccbfcd79fdbf6e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa9a560543d545bd50cbb9aa0e907a992f9b3afb36de7ec5e72010dd835d2574\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c86055d41460f757efc29eaa62834faf3f14f9ca5ba534479d0fcd0a43d3bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T13:42:47Z\\\",\\\"message\\\":\\\"le observer\\\\nW1121 13:42:47.565555 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 13:42:47.567527 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 13:42:47.569658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3912986073/tls.crt::/tmp/serving-cert-3912986073/tls.key\\\\\\\"\\\\nI1121 13:42:47.852533 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 13:42:47.856751 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 13:42:47.856781 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 13:42:47.856814 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 13:42:47.856821 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 13:42:47.862211 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 13:42:47.862280 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862290 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862309 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 13:42:47.862319 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 13:42:47.862326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 13:42:47.862333 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 13:42:47.863057 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 13:42:47.865438 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6b16c41d8bc248fc4de65102a71d3875d1ab768432f61581605fa487ebfc9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:01Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.990376 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.990676 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.990705 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.990777 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.990798 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:01Z","lastTransitionTime":"2025-11-21T13:43:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:01 crc kubenswrapper[5133]: I1121 13:43:01.990951 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede95ef8b82acda5cadd081a37fcb2a35fab8269c7ec403bb33a6feb8bf9eb88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3037d1f01bc9704cae9aa3eb4760e4dc737b1990a6ae5a007d3ec412efad85a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:01Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.010258 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:02Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.026111 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:02Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.040562 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:02Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.050072 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pvdwc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87822156-53e8-4eb5-b241-db506a21a1b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc374f8a6deccf60de941df327adc9f29d951a10746bd754c0d4f5573a141a71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jzt65\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pvdwc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:02Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.066274 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f826000-be5b-4f8f-bdc5-b80e11bb5e65\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cac876542527f108f89313704d6275aed6b735176f7f38b0fccbfcd79fdbf6e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa9a560543d545bd50cbb9aa0e907a992f9b3afb36de7ec5e72010dd835d2574\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c86055d41460f757efc29eaa62834faf3f14f9ca5ba534479d0fcd0a43d3bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T13:42:47Z\\\",\\\"message\\\":\\\"le observer\\\\nW1121 13:42:47.565555 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 13:42:47.567527 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 13:42:47.569658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3912986073/tls.crt::/tmp/serving-cert-3912986073/tls.key\\\\\\\"\\\\nI1121 13:42:47.852533 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 13:42:47.856751 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 13:42:47.856781 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 13:42:47.856814 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 13:42:47.856821 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 13:42:47.862211 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 13:42:47.862280 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862290 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862309 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 13:42:47.862319 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 13:42:47.862326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 13:42:47.862333 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 13:42:47.863057 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 13:42:47.865438 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6b16c41d8bc248fc4de65102a71d3875d1ab768432f61581605fa487ebfc9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:02Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.082213 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede95ef8b82acda5cadd081a37fcb2a35fab8269c7ec403bb33a6feb8bf9eb88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3037d1f01bc9704cae9aa3eb4760e4dc737b1990a6ae5a007d3ec412efad85a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:02Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.093520 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.093739 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.093838 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.093864 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.093876 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:02Z","lastTransitionTime":"2025-11-21T13:43:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.101989 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:02Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.114974 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1236e5c5c7c8db59fd2faa688e9b781fe94721cc8aa644dd9ab91df2684c617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:02Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.135540 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m5d24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0077329a-abad-4c6d-a601-2dc01fd83184\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24fe246ff402a8854ee5e55ccc507a2e497dbb2cdfed3f0f8b380f00b9436661\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmd8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m5d24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:02Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.151971 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f5a729-05d1-4f84-a216-1df3233af57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c46519115b067feef9d8fb5783b8b9420bf99d97515021a7d389e6cdf1d64112\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e5c730e837240b2ed45dff8a5411b8b49d21e7fbfb2dfcc6aef568b73b57745\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xxlvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:02Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.178053 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"373d5da7-fae9-4689-9ede-6e2d69a54c02\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce076f27563e648bcbfd183634e87e0e31cedc359d0df1edc6af448b2a18f1a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b6bfce121246f367a034c172b839a31fe309cfc0f83db7ab4e48cb26d6a5145\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c53aca99f41348a8343f7a2a2afd9ca78e2e4ba6aae9bb06cdb3ed66c9d79aa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aee1dbbc6cd90fac255e86ddb27f159eba2e08dc6cc749a8eb351842330ee6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://563b9e061f37ddab57173a01efbf7bf025c470edccc47a03e7c5bb1e317a289f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd5003cc4327d8234259623232e844463af0efdc0b3e395fa3e2c30c714b872d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62687577ee38477642562b9cb7ae3be1bf47b33bf17d61b2d5e5f95a4568b330\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ab3fdf87c8fc052cd429333579ede0e857fcc8399de947f661b159e6a5f2a93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tjzm8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:02Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.196821 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.196856 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.196866 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.196882 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.196895 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:02Z","lastTransitionTime":"2025-11-21T13:43:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.204050 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b9v8b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0ec3a98-4d89-4f36-a79e-ac65da8672ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d21a06ad72a199d989c29326725c9d49df6fbb9fc6e9d54bcd1f7bb89c78b02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6d21a06ad72a199d989c29326725c9d49df6fbb9fc6e9d54bcd1f7bb89c78b02\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7216da1e17ab61329ca71325b40fdbd040dbf83f072d565302a571acb1313e53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7216da1e17ab61329ca71325b40fdbd040dbf83f072d565302a571acb1313e53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a466e70f0711cd7d64221f62309d99dfa9e4b9910a69b2397240cfdf244578\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3a466e70f0711cd7d64221f62309d99dfa9e4b9910a69b2397240cfdf244578\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:43:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b9v8b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:02Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.222432 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:02Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.239484 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:02Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.252698 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pvdwc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87822156-53e8-4eb5-b241-db506a21a1b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc374f8a6deccf60de941df327adc9f29d951a10746bd754c0d4f5573a141a71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jzt65\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pvdwc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:02Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.269189 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c335cd8-618b-4871-a0e2-deaa61ddc49a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5f6320dbfb8d910e52de319fe5350b435c1c9f00a2e1d5b2b953fb6d1688984\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://273a23deb0bee7d80bc12f28a4056a5b843e81cc7c411273e49c3aa0fdba5182\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c93dddad8f7a853e1302ba96f3fc6d6626b22de64c8cfd1ee63996820d0816cd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebb7634a6507b2323d36c3d57b19c374862e0bada0e81150da9db315e5812f12\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:02Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.283404 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1b5e12d17b3e683349818698223816569514a9f4ae5d14ba1f5661c472fce39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:02Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.297052 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bj52j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9cc533c-2914-45d2-97b4-d6e35361450d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b1cfd49e0e5564696bd26f92acb10ca3430f81dc3f690a51ecd7bfa14876bccb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdp6q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bj52j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:02Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.299078 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.299123 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.299135 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.299153 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.299166 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:02Z","lastTransitionTime":"2025-11-21T13:43:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.401851 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.401889 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.401900 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.401915 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.401927 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:02Z","lastTransitionTime":"2025-11-21T13:43:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.457793 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:43:02 crc kubenswrapper[5133]: E1121 13:43:02.457993 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.479744 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:02Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.492985 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pvdwc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87822156-53e8-4eb5-b241-db506a21a1b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc374f8a6deccf60de941df327adc9f29d951a10746bd754c0d4f5573a141a71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jzt65\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pvdwc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:02Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.505134 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.505183 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.505198 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.505218 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.505233 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:02Z","lastTransitionTime":"2025-11-21T13:43:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.511236 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bj52j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9cc533c-2914-45d2-97b4-d6e35361450d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b1cfd49e0e5564696bd26f92acb10ca3430f81dc3f690a51ecd7bfa14876bccb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdp6q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bj52j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:02Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.530871 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c335cd8-618b-4871-a0e2-deaa61ddc49a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5f6320dbfb8d910e52de319fe5350b435c1c9f00a2e1d5b2b953fb6d1688984\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://273a23deb0bee7d80bc12f28a4056a5b843e81cc7c411273e49c3aa0fdba5182\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c93dddad8f7a853e1302ba96f3fc6d6626b22de64c8cfd1ee63996820d0816cd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebb7634a6507b2323d36c3d57b19c374862e0bada0e81150da9db315e5812f12\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:02Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.552579 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1b5e12d17b3e683349818698223816569514a9f4ae5d14ba1f5661c472fce39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:02Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.573553 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:02Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.593607 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1236e5c5c7c8db59fd2faa688e9b781fe94721cc8aa644dd9ab91df2684c617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:02Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.608320 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.608389 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.608414 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.608444 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.608469 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:02Z","lastTransitionTime":"2025-11-21T13:43:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.612591 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m5d24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0077329a-abad-4c6d-a601-2dc01fd83184\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24fe246ff402a8854ee5e55ccc507a2e497dbb2cdfed3f0f8b380f00b9436661\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmd8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m5d24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:02Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.628711 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f5a729-05d1-4f84-a216-1df3233af57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c46519115b067feef9d8fb5783b8b9420bf99d97515021a7d389e6cdf1d64112\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e5c730e837240b2ed45dff8a5411b8b49d21e7fbfb2dfcc6aef568b73b57745\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xxlvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:02Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.663246 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"373d5da7-fae9-4689-9ede-6e2d69a54c02\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce076f27563e648bcbfd183634e87e0e31cedc359d0df1edc6af448b2a18f1a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b6bfce121246f367a034c172b839a31fe309cfc0f83db7ab4e48cb26d6a5145\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c53aca99f41348a8343f7a2a2afd9ca78e2e4ba6aae9bb06cdb3ed66c9d79aa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aee1dbbc6cd90fac255e86ddb27f159eba2e08dc6cc749a8eb351842330ee6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://563b9e061f37ddab57173a01efbf7bf025c470edccc47a03e7c5bb1e317a289f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd5003cc4327d8234259623232e844463af0efdc0b3e395fa3e2c30c714b872d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62687577ee38477642562b9cb7ae3be1bf47b33bf17d61b2d5e5f95a4568b330\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ab3fdf87c8fc052cd429333579ede0e857fcc8399de947f661b159e6a5f2a93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tjzm8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:02Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.687424 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b9v8b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0ec3a98-4d89-4f36-a79e-ac65da8672ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d21a06ad72a199d989c29326725c9d49df6fbb9fc6e9d54bcd1f7bb89c78b02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6d21a06ad72a199d989c29326725c9d49df6fbb9fc6e9d54bcd1f7bb89c78b02\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7216da1e17ab61329ca71325b40fdbd040dbf83f072d565302a571acb1313e53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7216da1e17ab61329ca71325b40fdbd040dbf83f072d565302a571acb1313e53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a466e70f0711cd7d64221f62309d99dfa9e4b9910a69b2397240cfdf244578\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3a466e70f0711cd7d64221f62309d99dfa9e4b9910a69b2397240cfdf244578\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:43:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b9v8b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:02Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.709736 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f826000-be5b-4f8f-bdc5-b80e11bb5e65\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cac876542527f108f89313704d6275aed6b735176f7f38b0fccbfcd79fdbf6e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa9a560543d545bd50cbb9aa0e907a992f9b3afb36de7ec5e72010dd835d2574\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c86055d41460f757efc29eaa62834faf3f14f9ca5ba534479d0fcd0a43d3bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T13:42:47Z\\\",\\\"message\\\":\\\"le observer\\\\nW1121 13:42:47.565555 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 13:42:47.567527 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 13:42:47.569658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3912986073/tls.crt::/tmp/serving-cert-3912986073/tls.key\\\\\\\"\\\\nI1121 13:42:47.852533 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 13:42:47.856751 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 13:42:47.856781 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 13:42:47.856814 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 13:42:47.856821 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 13:42:47.862211 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 13:42:47.862280 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862290 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862309 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 13:42:47.862319 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 13:42:47.862326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 13:42:47.862333 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 13:42:47.863057 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 13:42:47.865438 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6b16c41d8bc248fc4de65102a71d3875d1ab768432f61581605fa487ebfc9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:02Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.711753 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.711813 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.711840 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.711869 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.711891 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:02Z","lastTransitionTime":"2025-11-21T13:43:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.731273 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede95ef8b82acda5cadd081a37fcb2a35fab8269c7ec403bb33a6feb8bf9eb88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3037d1f01bc9704cae9aa3eb4760e4dc737b1990a6ae5a007d3ec412efad85a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:02Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.751203 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:02Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.815730 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.815838 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.815858 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.815882 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.815932 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:02Z","lastTransitionTime":"2025-11-21T13:43:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.919487 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.919550 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.919561 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.919598 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:02 crc kubenswrapper[5133]: I1121 13:43:02.919612 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:02Z","lastTransitionTime":"2025-11-21T13:43:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:03 crc kubenswrapper[5133]: I1121 13:43:03.023168 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:03 crc kubenswrapper[5133]: I1121 13:43:03.023278 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:03 crc kubenswrapper[5133]: I1121 13:43:03.023299 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:03 crc kubenswrapper[5133]: I1121 13:43:03.023325 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:03 crc kubenswrapper[5133]: I1121 13:43:03.023344 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:03Z","lastTransitionTime":"2025-11-21T13:43:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:03 crc kubenswrapper[5133]: I1121 13:43:03.126348 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:03 crc kubenswrapper[5133]: I1121 13:43:03.126408 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:03 crc kubenswrapper[5133]: I1121 13:43:03.126425 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:03 crc kubenswrapper[5133]: I1121 13:43:03.126449 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:03 crc kubenswrapper[5133]: I1121 13:43:03.126466 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:03Z","lastTransitionTime":"2025-11-21T13:43:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:03 crc kubenswrapper[5133]: I1121 13:43:03.230642 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:03 crc kubenswrapper[5133]: I1121 13:43:03.230697 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:03 crc kubenswrapper[5133]: I1121 13:43:03.230715 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:03 crc kubenswrapper[5133]: I1121 13:43:03.230740 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:03 crc kubenswrapper[5133]: I1121 13:43:03.230761 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:03Z","lastTransitionTime":"2025-11-21T13:43:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:03 crc kubenswrapper[5133]: I1121 13:43:03.333866 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:03 crc kubenswrapper[5133]: I1121 13:43:03.333953 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:03 crc kubenswrapper[5133]: I1121 13:43:03.333980 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:03 crc kubenswrapper[5133]: I1121 13:43:03.334047 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:03 crc kubenswrapper[5133]: I1121 13:43:03.334073 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:03Z","lastTransitionTime":"2025-11-21T13:43:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:03 crc kubenswrapper[5133]: I1121 13:43:03.437393 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:03 crc kubenswrapper[5133]: I1121 13:43:03.437456 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:03 crc kubenswrapper[5133]: I1121 13:43:03.437474 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:03 crc kubenswrapper[5133]: I1121 13:43:03.437498 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:03 crc kubenswrapper[5133]: I1121 13:43:03.437515 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:03Z","lastTransitionTime":"2025-11-21T13:43:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:03 crc kubenswrapper[5133]: I1121 13:43:03.457100 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:43:03 crc kubenswrapper[5133]: I1121 13:43:03.457286 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:43:03 crc kubenswrapper[5133]: E1121 13:43:03.457507 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 13:43:03 crc kubenswrapper[5133]: E1121 13:43:03.457639 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 13:43:03 crc kubenswrapper[5133]: I1121 13:43:03.541559 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:03 crc kubenswrapper[5133]: I1121 13:43:03.541871 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:03 crc kubenswrapper[5133]: I1121 13:43:03.542054 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:03 crc kubenswrapper[5133]: I1121 13:43:03.542283 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:03 crc kubenswrapper[5133]: I1121 13:43:03.542429 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:03Z","lastTransitionTime":"2025-11-21T13:43:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:03 crc kubenswrapper[5133]: I1121 13:43:03.645319 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:03 crc kubenswrapper[5133]: I1121 13:43:03.645379 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:03 crc kubenswrapper[5133]: I1121 13:43:03.645398 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:03 crc kubenswrapper[5133]: I1121 13:43:03.645427 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:03 crc kubenswrapper[5133]: I1121 13:43:03.645444 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:03Z","lastTransitionTime":"2025-11-21T13:43:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:03 crc kubenswrapper[5133]: I1121 13:43:03.748662 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:03 crc kubenswrapper[5133]: I1121 13:43:03.748724 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:03 crc kubenswrapper[5133]: I1121 13:43:03.748742 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:03 crc kubenswrapper[5133]: I1121 13:43:03.748765 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:03 crc kubenswrapper[5133]: I1121 13:43:03.748780 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:03Z","lastTransitionTime":"2025-11-21T13:43:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:03 crc kubenswrapper[5133]: I1121 13:43:03.788088 5133 generic.go:334] "Generic (PLEG): container finished" podID="c0ec3a98-4d89-4f36-a79e-ac65da8672ca" containerID="1a6a968fb55017d7e2fee878bb4d50904766820de016afd8f01de3bbd92b2421" exitCode=0 Nov 21 13:43:03 crc kubenswrapper[5133]: I1121 13:43:03.788184 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-b9v8b" event={"ID":"c0ec3a98-4d89-4f36-a79e-ac65da8672ca","Type":"ContainerDied","Data":"1a6a968fb55017d7e2fee878bb4d50904766820de016afd8f01de3bbd92b2421"} Nov 21 13:43:03 crc kubenswrapper[5133]: I1121 13:43:03.809355 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c335cd8-618b-4871-a0e2-deaa61ddc49a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5f6320dbfb8d910e52de319fe5350b435c1c9f00a2e1d5b2b953fb6d1688984\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://273a23deb0bee7d80bc12f28a4056a5b843e81cc7c411273e49c3aa0fdba5182\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c93dddad8f7a853e1302ba96f3fc6d6626b22de64c8cfd1ee63996820d0816cd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebb7634a6507b2323d36c3d57b19c374862e0bada0e81150da9db315e5812f12\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:03Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:03 crc kubenswrapper[5133]: I1121 13:43:03.827234 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1b5e12d17b3e683349818698223816569514a9f4ae5d14ba1f5661c472fce39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:03Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:03 crc kubenswrapper[5133]: I1121 13:43:03.840891 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bj52j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9cc533c-2914-45d2-97b4-d6e35361450d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b1cfd49e0e5564696bd26f92acb10ca3430f81dc3f690a51ecd7bfa14876bccb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdp6q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bj52j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:03Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:03 crc kubenswrapper[5133]: I1121 13:43:03.851575 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:03 crc kubenswrapper[5133]: I1121 13:43:03.851609 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:03 crc kubenswrapper[5133]: I1121 13:43:03.851620 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:03 crc kubenswrapper[5133]: I1121 13:43:03.851637 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:03 crc kubenswrapper[5133]: I1121 13:43:03.851650 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:03Z","lastTransitionTime":"2025-11-21T13:43:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:03 crc kubenswrapper[5133]: I1121 13:43:03.858408 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f5a729-05d1-4f84-a216-1df3233af57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c46519115b067feef9d8fb5783b8b9420bf99d97515021a7d389e6cdf1d64112\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e5c730e837240b2ed45dff8a5411b8b49d21e7fbfb2dfcc6aef568b73b57745\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xxlvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:03Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:03 crc kubenswrapper[5133]: I1121 13:43:03.882361 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"373d5da7-fae9-4689-9ede-6e2d69a54c02\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce076f27563e648bcbfd183634e87e0e31cedc359d0df1edc6af448b2a18f1a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b6bfce121246f367a034c172b839a31fe309cfc0f83db7ab4e48cb26d6a5145\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c53aca99f41348a8343f7a2a2afd9ca78e2e4ba6aae9bb06cdb3ed66c9d79aa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aee1dbbc6cd90fac255e86ddb27f159eba2e08dc6cc749a8eb351842330ee6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://563b9e061f37ddab57173a01efbf7bf025c470edccc47a03e7c5bb1e317a289f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd5003cc4327d8234259623232e844463af0efdc0b3e395fa3e2c30c714b872d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62687577ee38477642562b9cb7ae3be1bf47b33bf17d61b2d5e5f95a4568b330\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ab3fdf87c8fc052cd429333579ede0e857fcc8399de947f661b159e6a5f2a93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tjzm8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:03Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:03 crc kubenswrapper[5133]: I1121 13:43:03.898893 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b9v8b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0ec3a98-4d89-4f36-a79e-ac65da8672ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d21a06ad72a199d989c29326725c9d49df6fbb9fc6e9d54bcd1f7bb89c78b02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6d21a06ad72a199d989c29326725c9d49df6fbb9fc6e9d54bcd1f7bb89c78b02\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7216da1e17ab61329ca71325b40fdbd040dbf83f072d565302a571acb1313e53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7216da1e17ab61329ca71325b40fdbd040dbf83f072d565302a571acb1313e53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a466e70f0711cd7d64221f62309d99dfa9e4b9910a69b2397240cfdf244578\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3a466e70f0711cd7d64221f62309d99dfa9e4b9910a69b2397240cfdf244578\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:43:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a6a968fb55017d7e2fee878bb4d50904766820de016afd8f01de3bbd92b2421\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a6a968fb55017d7e2fee878bb4d50904766820de016afd8f01de3bbd92b2421\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:43:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:43:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b9v8b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:03Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:03 crc kubenswrapper[5133]: I1121 13:43:03.919109 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f826000-be5b-4f8f-bdc5-b80e11bb5e65\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cac876542527f108f89313704d6275aed6b735176f7f38b0fccbfcd79fdbf6e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa9a560543d545bd50cbb9aa0e907a992f9b3afb36de7ec5e72010dd835d2574\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c86055d41460f757efc29eaa62834faf3f14f9ca5ba534479d0fcd0a43d3bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T13:42:47Z\\\",\\\"message\\\":\\\"le observer\\\\nW1121 13:42:47.565555 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 13:42:47.567527 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 13:42:47.569658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3912986073/tls.crt::/tmp/serving-cert-3912986073/tls.key\\\\\\\"\\\\nI1121 13:42:47.852533 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 13:42:47.856751 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 13:42:47.856781 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 13:42:47.856814 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 13:42:47.856821 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 13:42:47.862211 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 13:42:47.862280 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862290 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862309 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 13:42:47.862319 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 13:42:47.862326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 13:42:47.862333 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 13:42:47.863057 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 13:42:47.865438 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6b16c41d8bc248fc4de65102a71d3875d1ab768432f61581605fa487ebfc9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:03Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:03 crc kubenswrapper[5133]: I1121 13:43:03.933929 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede95ef8b82acda5cadd081a37fcb2a35fab8269c7ec403bb33a6feb8bf9eb88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3037d1f01bc9704cae9aa3eb4760e4dc737b1990a6ae5a007d3ec412efad85a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:03Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:03 crc kubenswrapper[5133]: I1121 13:43:03.947857 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:03Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:03 crc kubenswrapper[5133]: I1121 13:43:03.954426 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:03 crc kubenswrapper[5133]: I1121 13:43:03.954469 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:03 crc kubenswrapper[5133]: I1121 13:43:03.954480 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:03 crc kubenswrapper[5133]: I1121 13:43:03.954497 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:03 crc kubenswrapper[5133]: I1121 13:43:03.954509 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:03Z","lastTransitionTime":"2025-11-21T13:43:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:03 crc kubenswrapper[5133]: I1121 13:43:03.960878 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1236e5c5c7c8db59fd2faa688e9b781fe94721cc8aa644dd9ab91df2684c617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:03Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:03 crc kubenswrapper[5133]: I1121 13:43:03.971164 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m5d24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0077329a-abad-4c6d-a601-2dc01fd83184\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24fe246ff402a8854ee5e55ccc507a2e497dbb2cdfed3f0f8b380f00b9436661\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmd8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m5d24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:03Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:03 crc kubenswrapper[5133]: I1121 13:43:03.985791 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:03Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:03 crc kubenswrapper[5133]: I1121 13:43:03.992290 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:43:03 crc kubenswrapper[5133]: E1121 13:43:03.992507 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 13:43:19.992482684 +0000 UTC m=+59.790314932 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:43:03 crc kubenswrapper[5133]: I1121 13:43:03.998748 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:03Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.008293 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pvdwc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87822156-53e8-4eb5-b241-db506a21a1b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc374f8a6deccf60de941df327adc9f29d951a10746bd754c0d4f5573a141a71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jzt65\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pvdwc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:04Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.056728 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.056764 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.056773 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.056785 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.056795 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:04Z","lastTransitionTime":"2025-11-21T13:43:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.093290 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.093336 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.093356 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.093377 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:43:04 crc kubenswrapper[5133]: E1121 13:43:04.093452 5133 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 21 13:43:04 crc kubenswrapper[5133]: E1121 13:43:04.093467 5133 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 21 13:43:04 crc kubenswrapper[5133]: E1121 13:43:04.093492 5133 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 21 13:43:04 crc kubenswrapper[5133]: E1121 13:43:04.093502 5133 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 13:43:04 crc kubenswrapper[5133]: E1121 13:43:04.093503 5133 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 21 13:43:04 crc kubenswrapper[5133]: E1121 13:43:04.093522 5133 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 21 13:43:04 crc kubenswrapper[5133]: E1121 13:43:04.093532 5133 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 13:43:04 crc kubenswrapper[5133]: E1121 13:43:04.093503 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-21 13:43:20.093488615 +0000 UTC m=+59.891320863 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 21 13:43:04 crc kubenswrapper[5133]: E1121 13:43:04.093467 5133 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 21 13:43:04 crc kubenswrapper[5133]: E1121 13:43:04.093596 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-21 13:43:20.093561717 +0000 UTC m=+59.891393965 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 13:43:04 crc kubenswrapper[5133]: E1121 13:43:04.093610 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-21 13:43:20.093604678 +0000 UTC m=+59.891436926 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 13:43:04 crc kubenswrapper[5133]: E1121 13:43:04.093657 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-21 13:43:20.093637259 +0000 UTC m=+59.891469597 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.160049 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.160108 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.160123 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.160143 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.160157 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:04Z","lastTransitionTime":"2025-11-21T13:43:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.264256 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.264346 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.264598 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.264635 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.264705 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:04Z","lastTransitionTime":"2025-11-21T13:43:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.369723 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.369789 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.369807 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.369836 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.369855 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:04Z","lastTransitionTime":"2025-11-21T13:43:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.457434 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:43:04 crc kubenswrapper[5133]: E1121 13:43:04.457709 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.472347 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.472418 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.472442 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.472469 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.472492 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:04Z","lastTransitionTime":"2025-11-21T13:43:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.575761 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.575828 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.575930 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.575960 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.575978 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:04Z","lastTransitionTime":"2025-11-21T13:43:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.678958 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.679125 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.679146 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.679165 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.679176 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:04Z","lastTransitionTime":"2025-11-21T13:43:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.782634 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.782705 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.782722 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.782746 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.782762 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:04Z","lastTransitionTime":"2025-11-21T13:43:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.812579 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-b9v8b" event={"ID":"c0ec3a98-4d89-4f36-a79e-ac65da8672ca","Type":"ContainerStarted","Data":"933bd113027e6a7ee2eba33df125dcdcf389c21b99c7dd32bc654edaf7278e2a"} Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.831566 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pvdwc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87822156-53e8-4eb5-b241-db506a21a1b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc374f8a6deccf60de941df327adc9f29d951a10746bd754c0d4f5573a141a71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jzt65\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pvdwc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:04Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.847672 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:04Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.864284 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1b5e12d17b3e683349818698223816569514a9f4ae5d14ba1f5661c472fce39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:04Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.879694 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bj52j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9cc533c-2914-45d2-97b4-d6e35361450d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b1cfd49e0e5564696bd26f92acb10ca3430f81dc3f690a51ecd7bfa14876bccb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdp6q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bj52j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:04Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.885200 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.885245 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.885263 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.885290 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.885308 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:04Z","lastTransitionTime":"2025-11-21T13:43:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.896340 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c335cd8-618b-4871-a0e2-deaa61ddc49a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5f6320dbfb8d910e52de319fe5350b435c1c9f00a2e1d5b2b953fb6d1688984\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://273a23deb0bee7d80bc12f28a4056a5b843e81cc7c411273e49c3aa0fdba5182\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c93dddad8f7a853e1302ba96f3fc6d6626b22de64c8cfd1ee63996820d0816cd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebb7634a6507b2323d36c3d57b19c374862e0bada0e81150da9db315e5812f12\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:04Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.915116 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede95ef8b82acda5cadd081a37fcb2a35fab8269c7ec403bb33a6feb8bf9eb88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3037d1f01bc9704cae9aa3eb4760e4dc737b1990a6ae5a007d3ec412efad85a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:04Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.932063 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:04Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.939707 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.939749 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.939760 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.939801 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.939814 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:04Z","lastTransitionTime":"2025-11-21T13:43:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.953185 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1236e5c5c7c8db59fd2faa688e9b781fe94721cc8aa644dd9ab91df2684c617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:04Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:04 crc kubenswrapper[5133]: E1121 13:43:04.960052 5133 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"eb1f691e-5306-40d5-9666-4e51161aa15a\\\",\\\"systemUUID\\\":\\\"537cb059-79e6-48e5-b353-57bb495db8a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:04Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.965438 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.965503 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.965523 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.965548 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.965567 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:04Z","lastTransitionTime":"2025-11-21T13:43:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.976525 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m5d24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0077329a-abad-4c6d-a601-2dc01fd83184\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24fe246ff402a8854ee5e55ccc507a2e497dbb2cdfed3f0f8b380f00b9436661\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmd8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m5d24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:04Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:04 crc kubenswrapper[5133]: E1121 13:43:04.983968 5133 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"eb1f691e-5306-40d5-9666-4e51161aa15a\\\",\\\"systemUUID\\\":\\\"537cb059-79e6-48e5-b353-57bb495db8a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:04Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.989126 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.989197 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.989217 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.989241 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.989259 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:04Z","lastTransitionTime":"2025-11-21T13:43:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:04 crc kubenswrapper[5133]: I1121 13:43:04.997260 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f5a729-05d1-4f84-a216-1df3233af57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c46519115b067feef9d8fb5783b8b9420bf99d97515021a7d389e6cdf1d64112\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e5c730e837240b2ed45dff8a5411b8b49d21e7fbfb2dfcc6aef568b73b57745\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xxlvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:04Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:05 crc kubenswrapper[5133]: E1121 13:43:05.006794 5133 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"eb1f691e-5306-40d5-9666-4e51161aa15a\\\",\\\"systemUUID\\\":\\\"537cb059-79e6-48e5-b353-57bb495db8a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:05Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.011256 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.011336 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.011361 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.011390 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.011414 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:05Z","lastTransitionTime":"2025-11-21T13:43:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.028635 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"373d5da7-fae9-4689-9ede-6e2d69a54c02\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce076f27563e648bcbfd183634e87e0e31cedc359d0df1edc6af448b2a18f1a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b6bfce121246f367a034c172b839a31fe309cfc0f83db7ab4e48cb26d6a5145\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c53aca99f41348a8343f7a2a2afd9ca78e2e4ba6aae9bb06cdb3ed66c9d79aa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aee1dbbc6cd90fac255e86ddb27f159eba2e08dc6cc749a8eb351842330ee6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://563b9e061f37ddab57173a01efbf7bf025c470edccc47a03e7c5bb1e317a289f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd5003cc4327d8234259623232e844463af0efdc0b3e395fa3e2c30c714b872d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62687577ee38477642562b9cb7ae3be1bf47b33bf17d61b2d5e5f95a4568b330\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ab3fdf87c8fc052cd429333579ede0e857fcc8399de947f661b159e6a5f2a93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tjzm8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:05Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:05 crc kubenswrapper[5133]: E1121 13:43:05.031815 5133 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:05Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:05Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"eb1f691e-5306-40d5-9666-4e51161aa15a\\\",\\\"systemUUID\\\":\\\"537cb059-79e6-48e5-b353-57bb495db8a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:05Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.036650 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.036686 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.036697 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.036713 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.036725 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:05Z","lastTransitionTime":"2025-11-21T13:43:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.050184 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b9v8b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0ec3a98-4d89-4f36-a79e-ac65da8672ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://933bd113027e6a7ee2eba33df125dcdcf389c21b99c7dd32bc654edaf7278e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d21a06ad72a199d989c29326725c9d49df6fbb9fc6e9d54bcd1f7bb89c78b02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6d21a06ad72a199d989c29326725c9d49df6fbb9fc6e9d54bcd1f7bb89c78b02\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7216da1e17ab61329ca71325b40fdbd040dbf83f072d565302a571acb1313e53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7216da1e17ab61329ca71325b40fdbd040dbf83f072d565302a571acb1313e53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a466e70f0711cd7d64221f62309d99dfa9e4b9910a69b2397240cfdf244578\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3a466e70f0711cd7d64221f62309d99dfa9e4b9910a69b2397240cfdf244578\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:43:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a6a968fb55017d7e2fee878bb4d50904766820de016afd8f01de3bbd92b2421\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a6a968fb55017d7e2fee878bb4d50904766820de016afd8f01de3bbd92b2421\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:43:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:43:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b9v8b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:05Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:05 crc kubenswrapper[5133]: E1121 13:43:05.053326 5133 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:05Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:05Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"eb1f691e-5306-40d5-9666-4e51161aa15a\\\",\\\"systemUUID\\\":\\\"537cb059-79e6-48e5-b353-57bb495db8a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:05Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:05 crc kubenswrapper[5133]: E1121 13:43:05.053552 5133 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.055074 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.055093 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.055101 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.055115 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.055125 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:05Z","lastTransitionTime":"2025-11-21T13:43:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.071692 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f826000-be5b-4f8f-bdc5-b80e11bb5e65\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cac876542527f108f89313704d6275aed6b735176f7f38b0fccbfcd79fdbf6e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa9a560543d545bd50cbb9aa0e907a992f9b3afb36de7ec5e72010dd835d2574\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c86055d41460f757efc29eaa62834faf3f14f9ca5ba534479d0fcd0a43d3bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T13:42:47Z\\\",\\\"message\\\":\\\"le observer\\\\nW1121 13:42:47.565555 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 13:42:47.567527 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 13:42:47.569658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3912986073/tls.crt::/tmp/serving-cert-3912986073/tls.key\\\\\\\"\\\\nI1121 13:42:47.852533 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 13:42:47.856751 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 13:42:47.856781 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 13:42:47.856814 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 13:42:47.856821 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 13:42:47.862211 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 13:42:47.862280 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862290 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862309 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 13:42:47.862319 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 13:42:47.862326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 13:42:47.862333 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 13:42:47.863057 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 13:42:47.865438 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6b16c41d8bc248fc4de65102a71d3875d1ab768432f61581605fa487ebfc9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:05Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.088115 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:05Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.157195 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.157238 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.157250 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.157290 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.157302 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:05Z","lastTransitionTime":"2025-11-21T13:43:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.260889 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.260956 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.260976 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.261034 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.261055 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:05Z","lastTransitionTime":"2025-11-21T13:43:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.364705 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.364768 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.364782 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.364804 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.365207 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:05Z","lastTransitionTime":"2025-11-21T13:43:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.456861 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:43:05 crc kubenswrapper[5133]: E1121 13:43:05.457381 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.457418 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:43:05 crc kubenswrapper[5133]: E1121 13:43:05.457734 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.468016 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.468065 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.468079 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.468099 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.468114 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:05Z","lastTransitionTime":"2025-11-21T13:43:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.571433 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.571510 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.571532 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.571562 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.571585 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:05Z","lastTransitionTime":"2025-11-21T13:43:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.674791 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.674856 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.674879 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.674907 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.674928 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:05Z","lastTransitionTime":"2025-11-21T13:43:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.778503 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.778767 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.778941 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.779129 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.779266 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:05Z","lastTransitionTime":"2025-11-21T13:43:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.845942 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7vfdg"] Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.854147 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7vfdg" Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.857077 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.858372 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.882498 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.882554 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.882566 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.882581 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.882592 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:05Z","lastTransitionTime":"2025-11-21T13:43:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.883459 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:05Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.895720 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pvdwc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87822156-53e8-4eb5-b241-db506a21a1b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc374f8a6deccf60de941df327adc9f29d951a10746bd754c0d4f5573a141a71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jzt65\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pvdwc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:05Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.908731 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wrzcz\" (UniqueName: \"kubernetes.io/projected/2c337e24-9cef-4932-92ae-5a175379c77a-kube-api-access-wrzcz\") pod \"ovnkube-control-plane-749d76644c-7vfdg\" (UID: \"2c337e24-9cef-4932-92ae-5a175379c77a\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7vfdg" Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.908770 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/2c337e24-9cef-4932-92ae-5a175379c77a-env-overrides\") pod \"ovnkube-control-plane-749d76644c-7vfdg\" (UID: \"2c337e24-9cef-4932-92ae-5a175379c77a\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7vfdg" Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.908806 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/2c337e24-9cef-4932-92ae-5a175379c77a-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-7vfdg\" (UID: \"2c337e24-9cef-4932-92ae-5a175379c77a\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7vfdg" Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.908830 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/2c337e24-9cef-4932-92ae-5a175379c77a-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-7vfdg\" (UID: \"2c337e24-9cef-4932-92ae-5a175379c77a\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7vfdg" Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.909143 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c335cd8-618b-4871-a0e2-deaa61ddc49a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5f6320dbfb8d910e52de319fe5350b435c1c9f00a2e1d5b2b953fb6d1688984\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://273a23deb0bee7d80bc12f28a4056a5b843e81cc7c411273e49c3aa0fdba5182\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c93dddad8f7a853e1302ba96f3fc6d6626b22de64c8cfd1ee63996820d0816cd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebb7634a6507b2323d36c3d57b19c374862e0bada0e81150da9db315e5812f12\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:05Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.921411 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1b5e12d17b3e683349818698223816569514a9f4ae5d14ba1f5661c472fce39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:05Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.932609 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bj52j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9cc533c-2914-45d2-97b4-d6e35361450d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b1cfd49e0e5564696bd26f92acb10ca3430f81dc3f690a51ecd7bfa14876bccb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdp6q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bj52j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:05Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.943758 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1236e5c5c7c8db59fd2faa688e9b781fe94721cc8aa644dd9ab91df2684c617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:05Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.958411 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m5d24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0077329a-abad-4c6d-a601-2dc01fd83184\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24fe246ff402a8854ee5e55ccc507a2e497dbb2cdfed3f0f8b380f00b9436661\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmd8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m5d24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:05Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.973170 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f5a729-05d1-4f84-a216-1df3233af57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c46519115b067feef9d8fb5783b8b9420bf99d97515021a7d389e6cdf1d64112\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e5c730e837240b2ed45dff8a5411b8b49d21e7fbfb2dfcc6aef568b73b57745\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xxlvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:05Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.984815 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.984860 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.984873 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.984893 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.984909 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:05Z","lastTransitionTime":"2025-11-21T13:43:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:05 crc kubenswrapper[5133]: I1121 13:43:05.991735 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"373d5da7-fae9-4689-9ede-6e2d69a54c02\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce076f27563e648bcbfd183634e87e0e31cedc359d0df1edc6af448b2a18f1a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b6bfce121246f367a034c172b839a31fe309cfc0f83db7ab4e48cb26d6a5145\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c53aca99f41348a8343f7a2a2afd9ca78e2e4ba6aae9bb06cdb3ed66c9d79aa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aee1dbbc6cd90fac255e86ddb27f159eba2e08dc6cc749a8eb351842330ee6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://563b9e061f37ddab57173a01efbf7bf025c470edccc47a03e7c5bb1e317a289f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd5003cc4327d8234259623232e844463af0efdc0b3e395fa3e2c30c714b872d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62687577ee38477642562b9cb7ae3be1bf47b33bf17d61b2d5e5f95a4568b330\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ab3fdf87c8fc052cd429333579ede0e857fcc8399de947f661b159e6a5f2a93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tjzm8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:05Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.006473 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b9v8b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0ec3a98-4d89-4f36-a79e-ac65da8672ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://933bd113027e6a7ee2eba33df125dcdcf389c21b99c7dd32bc654edaf7278e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d21a06ad72a199d989c29326725c9d49df6fbb9fc6e9d54bcd1f7bb89c78b02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6d21a06ad72a199d989c29326725c9d49df6fbb9fc6e9d54bcd1f7bb89c78b02\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7216da1e17ab61329ca71325b40fdbd040dbf83f072d565302a571acb1313e53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7216da1e17ab61329ca71325b40fdbd040dbf83f072d565302a571acb1313e53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a466e70f0711cd7d64221f62309d99dfa9e4b9910a69b2397240cfdf244578\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3a466e70f0711cd7d64221f62309d99dfa9e4b9910a69b2397240cfdf244578\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:43:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a6a968fb55017d7e2fee878bb4d50904766820de016afd8f01de3bbd92b2421\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a6a968fb55017d7e2fee878bb4d50904766820de016afd8f01de3bbd92b2421\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:43:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:43:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b9v8b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:06Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.009177 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wrzcz\" (UniqueName: \"kubernetes.io/projected/2c337e24-9cef-4932-92ae-5a175379c77a-kube-api-access-wrzcz\") pod \"ovnkube-control-plane-749d76644c-7vfdg\" (UID: \"2c337e24-9cef-4932-92ae-5a175379c77a\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7vfdg" Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.009207 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/2c337e24-9cef-4932-92ae-5a175379c77a-env-overrides\") pod \"ovnkube-control-plane-749d76644c-7vfdg\" (UID: \"2c337e24-9cef-4932-92ae-5a175379c77a\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7vfdg" Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.009249 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/2c337e24-9cef-4932-92ae-5a175379c77a-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-7vfdg\" (UID: \"2c337e24-9cef-4932-92ae-5a175379c77a\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7vfdg" Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.009275 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/2c337e24-9cef-4932-92ae-5a175379c77a-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-7vfdg\" (UID: \"2c337e24-9cef-4932-92ae-5a175379c77a\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7vfdg" Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.009902 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/2c337e24-9cef-4932-92ae-5a175379c77a-env-overrides\") pod \"ovnkube-control-plane-749d76644c-7vfdg\" (UID: \"2c337e24-9cef-4932-92ae-5a175379c77a\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7vfdg" Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.010286 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/2c337e24-9cef-4932-92ae-5a175379c77a-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-7vfdg\" (UID: \"2c337e24-9cef-4932-92ae-5a175379c77a\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7vfdg" Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.014161 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/2c337e24-9cef-4932-92ae-5a175379c77a-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-7vfdg\" (UID: \"2c337e24-9cef-4932-92ae-5a175379c77a\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7vfdg" Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.023050 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f826000-be5b-4f8f-bdc5-b80e11bb5e65\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cac876542527f108f89313704d6275aed6b735176f7f38b0fccbfcd79fdbf6e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa9a560543d545bd50cbb9aa0e907a992f9b3afb36de7ec5e72010dd835d2574\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c86055d41460f757efc29eaa62834faf3f14f9ca5ba534479d0fcd0a43d3bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T13:42:47Z\\\",\\\"message\\\":\\\"le observer\\\\nW1121 13:42:47.565555 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 13:42:47.567527 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 13:42:47.569658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3912986073/tls.crt::/tmp/serving-cert-3912986073/tls.key\\\\\\\"\\\\nI1121 13:42:47.852533 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 13:42:47.856751 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 13:42:47.856781 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 13:42:47.856814 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 13:42:47.856821 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 13:42:47.862211 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 13:42:47.862280 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862290 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862309 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 13:42:47.862319 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 13:42:47.862326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 13:42:47.862333 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 13:42:47.863057 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 13:42:47.865438 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6b16c41d8bc248fc4de65102a71d3875d1ab768432f61581605fa487ebfc9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:06Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.035023 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wrzcz\" (UniqueName: \"kubernetes.io/projected/2c337e24-9cef-4932-92ae-5a175379c77a-kube-api-access-wrzcz\") pod \"ovnkube-control-plane-749d76644c-7vfdg\" (UID: \"2c337e24-9cef-4932-92ae-5a175379c77a\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7vfdg" Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.041937 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede95ef8b82acda5cadd081a37fcb2a35fab8269c7ec403bb33a6feb8bf9eb88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3037d1f01bc9704cae9aa3eb4760e4dc737b1990a6ae5a007d3ec412efad85a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:06Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.056318 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:06Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.071556 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:06Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.081703 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7vfdg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c337e24-9cef-4932-92ae-5a175379c77a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrzcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrzcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:43:05Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7vfdg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:06Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.087584 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.087730 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.087875 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.088022 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.088140 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:06Z","lastTransitionTime":"2025-11-21T13:43:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.177330 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7vfdg" Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.191690 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.191756 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.191775 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.191802 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.191820 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:06Z","lastTransitionTime":"2025-11-21T13:43:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:06 crc kubenswrapper[5133]: W1121 13:43:06.198829 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2c337e24_9cef_4932_92ae_5a175379c77a.slice/crio-02812aa5613e4f30fdc1a2b612a68ad8d677cd0657c562374dd73384a01ef322 WatchSource:0}: Error finding container 02812aa5613e4f30fdc1a2b612a68ad8d677cd0657c562374dd73384a01ef322: Status 404 returned error can't find the container with id 02812aa5613e4f30fdc1a2b612a68ad8d677cd0657c562374dd73384a01ef322 Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.296500 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.296539 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.296551 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.296567 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.296578 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:06Z","lastTransitionTime":"2025-11-21T13:43:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.399910 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.399959 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.399971 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.400015 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.400032 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:06Z","lastTransitionTime":"2025-11-21T13:43:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.456797 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:43:06 crc kubenswrapper[5133]: E1121 13:43:06.456912 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.502392 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.502600 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.502803 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.502963 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.503095 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:06Z","lastTransitionTime":"2025-11-21T13:43:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.606381 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.606411 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.606419 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.606432 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.606442 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:06Z","lastTransitionTime":"2025-11-21T13:43:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.709300 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.709330 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.709339 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.709351 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.709360 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:06Z","lastTransitionTime":"2025-11-21T13:43:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.811522 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.811552 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.811561 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.811574 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.811584 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:06Z","lastTransitionTime":"2025-11-21T13:43:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.820874 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7vfdg" event={"ID":"2c337e24-9cef-4932-92ae-5a175379c77a","Type":"ContainerStarted","Data":"dc648165e145d8ed6a43aff3ffb558380dc55ffd03816922b643bf7f740088fa"} Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.820910 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7vfdg" event={"ID":"2c337e24-9cef-4932-92ae-5a175379c77a","Type":"ContainerStarted","Data":"0eeb4cc8e3340d9eb9710ca7b55244d4055d6bc7cc31a6d227f233988b242823"} Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.820920 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7vfdg" event={"ID":"2c337e24-9cef-4932-92ae-5a175379c77a","Type":"ContainerStarted","Data":"02812aa5613e4f30fdc1a2b612a68ad8d677cd0657c562374dd73384a01ef322"} Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.840554 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:06Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.851040 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pvdwc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87822156-53e8-4eb5-b241-db506a21a1b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc374f8a6deccf60de941df327adc9f29d951a10746bd754c0d4f5573a141a71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jzt65\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pvdwc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:06Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.865892 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c335cd8-618b-4871-a0e2-deaa61ddc49a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5f6320dbfb8d910e52de319fe5350b435c1c9f00a2e1d5b2b953fb6d1688984\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://273a23deb0bee7d80bc12f28a4056a5b843e81cc7c411273e49c3aa0fdba5182\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c93dddad8f7a853e1302ba96f3fc6d6626b22de64c8cfd1ee63996820d0816cd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebb7634a6507b2323d36c3d57b19c374862e0bada0e81150da9db315e5812f12\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:06Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.880552 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1b5e12d17b3e683349818698223816569514a9f4ae5d14ba1f5661c472fce39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:06Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.891197 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bj52j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9cc533c-2914-45d2-97b4-d6e35361450d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b1cfd49e0e5564696bd26f92acb10ca3430f81dc3f690a51ecd7bfa14876bccb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdp6q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bj52j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:06Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.904241 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1236e5c5c7c8db59fd2faa688e9b781fe94721cc8aa644dd9ab91df2684c617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:06Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.914140 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.914193 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.914205 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.914222 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.914545 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:06Z","lastTransitionTime":"2025-11-21T13:43:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.917890 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m5d24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0077329a-abad-4c6d-a601-2dc01fd83184\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24fe246ff402a8854ee5e55ccc507a2e497dbb2cdfed3f0f8b380f00b9436661\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmd8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m5d24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:06Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.929198 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f5a729-05d1-4f84-a216-1df3233af57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c46519115b067feef9d8fb5783b8b9420bf99d97515021a7d389e6cdf1d64112\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e5c730e837240b2ed45dff8a5411b8b49d21e7fbfb2dfcc6aef568b73b57745\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xxlvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:06Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.948919 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"373d5da7-fae9-4689-9ede-6e2d69a54c02\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce076f27563e648bcbfd183634e87e0e31cedc359d0df1edc6af448b2a18f1a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b6bfce121246f367a034c172b839a31fe309cfc0f83db7ab4e48cb26d6a5145\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c53aca99f41348a8343f7a2a2afd9ca78e2e4ba6aae9bb06cdb3ed66c9d79aa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aee1dbbc6cd90fac255e86ddb27f159eba2e08dc6cc749a8eb351842330ee6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://563b9e061f37ddab57173a01efbf7bf025c470edccc47a03e7c5bb1e317a289f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd5003cc4327d8234259623232e844463af0efdc0b3e395fa3e2c30c714b872d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62687577ee38477642562b9cb7ae3be1bf47b33bf17d61b2d5e5f95a4568b330\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ab3fdf87c8fc052cd429333579ede0e857fcc8399de947f661b159e6a5f2a93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tjzm8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:06Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.962907 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b9v8b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0ec3a98-4d89-4f36-a79e-ac65da8672ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://933bd113027e6a7ee2eba33df125dcdcf389c21b99c7dd32bc654edaf7278e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d21a06ad72a199d989c29326725c9d49df6fbb9fc6e9d54bcd1f7bb89c78b02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6d21a06ad72a199d989c29326725c9d49df6fbb9fc6e9d54bcd1f7bb89c78b02\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7216da1e17ab61329ca71325b40fdbd040dbf83f072d565302a571acb1313e53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7216da1e17ab61329ca71325b40fdbd040dbf83f072d565302a571acb1313e53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a466e70f0711cd7d64221f62309d99dfa9e4b9910a69b2397240cfdf244578\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3a466e70f0711cd7d64221f62309d99dfa9e4b9910a69b2397240cfdf244578\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:43:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a6a968fb55017d7e2fee878bb4d50904766820de016afd8f01de3bbd92b2421\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a6a968fb55017d7e2fee878bb4d50904766820de016afd8f01de3bbd92b2421\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:43:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:43:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b9v8b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:06Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.976162 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f826000-be5b-4f8f-bdc5-b80e11bb5e65\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cac876542527f108f89313704d6275aed6b735176f7f38b0fccbfcd79fdbf6e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa9a560543d545bd50cbb9aa0e907a992f9b3afb36de7ec5e72010dd835d2574\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c86055d41460f757efc29eaa62834faf3f14f9ca5ba534479d0fcd0a43d3bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T13:42:47Z\\\",\\\"message\\\":\\\"le observer\\\\nW1121 13:42:47.565555 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 13:42:47.567527 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 13:42:47.569658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3912986073/tls.crt::/tmp/serving-cert-3912986073/tls.key\\\\\\\"\\\\nI1121 13:42:47.852533 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 13:42:47.856751 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 13:42:47.856781 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 13:42:47.856814 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 13:42:47.856821 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 13:42:47.862211 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 13:42:47.862280 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862290 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862309 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 13:42:47.862319 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 13:42:47.862326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 13:42:47.862333 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 13:42:47.863057 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 13:42:47.865438 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6b16c41d8bc248fc4de65102a71d3875d1ab768432f61581605fa487ebfc9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:06Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:06 crc kubenswrapper[5133]: I1121 13:43:06.987796 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede95ef8b82acda5cadd081a37fcb2a35fab8269c7ec403bb33a6feb8bf9eb88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3037d1f01bc9704cae9aa3eb4760e4dc737b1990a6ae5a007d3ec412efad85a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:06Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.000514 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:06Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.017150 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.017372 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.017457 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.017523 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.017579 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:07Z","lastTransitionTime":"2025-11-21T13:43:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.039721 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:07Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.066385 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7vfdg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c337e24-9cef-4932-92ae-5a175379c77a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0eeb4cc8e3340d9eb9710ca7b55244d4055d6bc7cc31a6d227f233988b242823\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrzcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc648165e145d8ed6a43aff3ffb558380dc55ffd03816922b643bf7f740088fa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrzcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:43:05Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7vfdg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:07Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.120031 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.120255 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.120356 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.120451 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.120532 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:07Z","lastTransitionTime":"2025-11-21T13:43:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.222325 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.222377 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.222394 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.222416 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.222432 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:07Z","lastTransitionTime":"2025-11-21T13:43:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.325747 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.325795 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.325809 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.325829 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.325841 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:07Z","lastTransitionTime":"2025-11-21T13:43:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.396905 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-x5wnh"] Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.397352 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-x5wnh" Nov 21 13:43:07 crc kubenswrapper[5133]: E1121 13:43:07.397405 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-x5wnh" podUID="b3aabda0-97d9-4886-8909-1c423c4d3238" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.415711 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:07Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.418943 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b3aabda0-97d9-4886-8909-1c423c4d3238-metrics-certs\") pod \"network-metrics-daemon-x5wnh\" (UID: \"b3aabda0-97d9-4886-8909-1c423c4d3238\") " pod="openshift-multus/network-metrics-daemon-x5wnh" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.418978 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p8kp7\" (UniqueName: \"kubernetes.io/projected/b3aabda0-97d9-4886-8909-1c423c4d3238-kube-api-access-p8kp7\") pod \"network-metrics-daemon-x5wnh\" (UID: \"b3aabda0-97d9-4886-8909-1c423c4d3238\") " pod="openshift-multus/network-metrics-daemon-x5wnh" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.429597 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.429654 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.429663 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.429683 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.429694 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:07Z","lastTransitionTime":"2025-11-21T13:43:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.429694 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pvdwc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87822156-53e8-4eb5-b241-db506a21a1b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc374f8a6deccf60de941df327adc9f29d951a10746bd754c0d4f5573a141a71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jzt65\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pvdwc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:07Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.489621 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:43:07 crc kubenswrapper[5133]: E1121 13:43:07.490195 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.489660 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:43:07 crc kubenswrapper[5133]: E1121 13:43:07.490778 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.493913 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c335cd8-618b-4871-a0e2-deaa61ddc49a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5f6320dbfb8d910e52de319fe5350b435c1c9f00a2e1d5b2b953fb6d1688984\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://273a23deb0bee7d80bc12f28a4056a5b843e81cc7c411273e49c3aa0fdba5182\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c93dddad8f7a853e1302ba96f3fc6d6626b22de64c8cfd1ee63996820d0816cd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebb7634a6507b2323d36c3d57b19c374862e0bada0e81150da9db315e5812f12\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:07Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.509751 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1b5e12d17b3e683349818698223816569514a9f4ae5d14ba1f5661c472fce39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:07Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.519950 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b3aabda0-97d9-4886-8909-1c423c4d3238-metrics-certs\") pod \"network-metrics-daemon-x5wnh\" (UID: \"b3aabda0-97d9-4886-8909-1c423c4d3238\") " pod="openshift-multus/network-metrics-daemon-x5wnh" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.520058 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p8kp7\" (UniqueName: \"kubernetes.io/projected/b3aabda0-97d9-4886-8909-1c423c4d3238-kube-api-access-p8kp7\") pod \"network-metrics-daemon-x5wnh\" (UID: \"b3aabda0-97d9-4886-8909-1c423c4d3238\") " pod="openshift-multus/network-metrics-daemon-x5wnh" Nov 21 13:43:07 crc kubenswrapper[5133]: E1121 13:43:07.520174 5133 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 21 13:43:07 crc kubenswrapper[5133]: E1121 13:43:07.520271 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b3aabda0-97d9-4886-8909-1c423c4d3238-metrics-certs podName:b3aabda0-97d9-4886-8909-1c423c4d3238 nodeName:}" failed. No retries permitted until 2025-11-21 13:43:08.020243888 +0000 UTC m=+47.818076336 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/b3aabda0-97d9-4886-8909-1c423c4d3238-metrics-certs") pod "network-metrics-daemon-x5wnh" (UID: "b3aabda0-97d9-4886-8909-1c423c4d3238") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.521624 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bj52j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9cc533c-2914-45d2-97b4-d6e35361450d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b1cfd49e0e5564696bd26f92acb10ca3430f81dc3f690a51ecd7bfa14876bccb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdp6q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bj52j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:07Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.532502 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-x5wnh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3aabda0-97d9-4886-8909-1c423c4d3238\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p8kp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p8kp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:43:07Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-x5wnh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:07Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.533542 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.533572 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.533586 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.533600 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.533611 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:07Z","lastTransitionTime":"2025-11-21T13:43:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.536601 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p8kp7\" (UniqueName: \"kubernetes.io/projected/b3aabda0-97d9-4886-8909-1c423c4d3238-kube-api-access-p8kp7\") pod \"network-metrics-daemon-x5wnh\" (UID: \"b3aabda0-97d9-4886-8909-1c423c4d3238\") " pod="openshift-multus/network-metrics-daemon-x5wnh" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.547449 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f826000-be5b-4f8f-bdc5-b80e11bb5e65\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cac876542527f108f89313704d6275aed6b735176f7f38b0fccbfcd79fdbf6e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa9a560543d545bd50cbb9aa0e907a992f9b3afb36de7ec5e72010dd835d2574\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c86055d41460f757efc29eaa62834faf3f14f9ca5ba534479d0fcd0a43d3bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T13:42:47Z\\\",\\\"message\\\":\\\"le observer\\\\nW1121 13:42:47.565555 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 13:42:47.567527 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 13:42:47.569658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3912986073/tls.crt::/tmp/serving-cert-3912986073/tls.key\\\\\\\"\\\\nI1121 13:42:47.852533 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 13:42:47.856751 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 13:42:47.856781 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 13:42:47.856814 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 13:42:47.856821 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 13:42:47.862211 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 13:42:47.862280 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862290 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862309 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 13:42:47.862319 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 13:42:47.862326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 13:42:47.862333 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 13:42:47.863057 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 13:42:47.865438 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6b16c41d8bc248fc4de65102a71d3875d1ab768432f61581605fa487ebfc9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:07Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.562606 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede95ef8b82acda5cadd081a37fcb2a35fab8269c7ec403bb33a6feb8bf9eb88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3037d1f01bc9704cae9aa3eb4760e4dc737b1990a6ae5a007d3ec412efad85a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:07Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.574620 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:07Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.590280 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1236e5c5c7c8db59fd2faa688e9b781fe94721cc8aa644dd9ab91df2684c617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:07Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.605626 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m5d24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0077329a-abad-4c6d-a601-2dc01fd83184\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24fe246ff402a8854ee5e55ccc507a2e497dbb2cdfed3f0f8b380f00b9436661\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmd8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m5d24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:07Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.620038 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f5a729-05d1-4f84-a216-1df3233af57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c46519115b067feef9d8fb5783b8b9420bf99d97515021a7d389e6cdf1d64112\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e5c730e837240b2ed45dff8a5411b8b49d21e7fbfb2dfcc6aef568b73b57745\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xxlvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:07Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.636251 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.636312 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.636323 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.636343 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.636356 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:07Z","lastTransitionTime":"2025-11-21T13:43:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.645269 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"373d5da7-fae9-4689-9ede-6e2d69a54c02\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce076f27563e648bcbfd183634e87e0e31cedc359d0df1edc6af448b2a18f1a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b6bfce121246f367a034c172b839a31fe309cfc0f83db7ab4e48cb26d6a5145\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c53aca99f41348a8343f7a2a2afd9ca78e2e4ba6aae9bb06cdb3ed66c9d79aa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aee1dbbc6cd90fac255e86ddb27f159eba2e08dc6cc749a8eb351842330ee6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://563b9e061f37ddab57173a01efbf7bf025c470edccc47a03e7c5bb1e317a289f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd5003cc4327d8234259623232e844463af0efdc0b3e395fa3e2c30c714b872d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62687577ee38477642562b9cb7ae3be1bf47b33bf17d61b2d5e5f95a4568b330\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ab3fdf87c8fc052cd429333579ede0e857fcc8399de947f661b159e6a5f2a93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tjzm8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:07Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.673393 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b9v8b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0ec3a98-4d89-4f36-a79e-ac65da8672ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://933bd113027e6a7ee2eba33df125dcdcf389c21b99c7dd32bc654edaf7278e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d21a06ad72a199d989c29326725c9d49df6fbb9fc6e9d54bcd1f7bb89c78b02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6d21a06ad72a199d989c29326725c9d49df6fbb9fc6e9d54bcd1f7bb89c78b02\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7216da1e17ab61329ca71325b40fdbd040dbf83f072d565302a571acb1313e53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7216da1e17ab61329ca71325b40fdbd040dbf83f072d565302a571acb1313e53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a466e70f0711cd7d64221f62309d99dfa9e4b9910a69b2397240cfdf244578\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3a466e70f0711cd7d64221f62309d99dfa9e4b9910a69b2397240cfdf244578\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:43:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a6a968fb55017d7e2fee878bb4d50904766820de016afd8f01de3bbd92b2421\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a6a968fb55017d7e2fee878bb4d50904766820de016afd8f01de3bbd92b2421\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:43:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:43:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b9v8b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:07Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.694303 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:07Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.706616 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7vfdg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c337e24-9cef-4932-92ae-5a175379c77a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0eeb4cc8e3340d9eb9710ca7b55244d4055d6bc7cc31a6d227f233988b242823\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrzcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc648165e145d8ed6a43aff3ffb558380dc55ffd03816922b643bf7f740088fa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrzcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:43:05Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7vfdg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:07Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.738811 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.738847 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.738857 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.738870 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.738879 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:07Z","lastTransitionTime":"2025-11-21T13:43:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.825841 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-tjzm8_373d5da7-fae9-4689-9ede-6e2d69a54c02/ovnkube-controller/0.log" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.829159 5133 generic.go:334] "Generic (PLEG): container finished" podID="373d5da7-fae9-4689-9ede-6e2d69a54c02" containerID="62687577ee38477642562b9cb7ae3be1bf47b33bf17d61b2d5e5f95a4568b330" exitCode=1 Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.829206 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" event={"ID":"373d5da7-fae9-4689-9ede-6e2d69a54c02","Type":"ContainerDied","Data":"62687577ee38477642562b9cb7ae3be1bf47b33bf17d61b2d5e5f95a4568b330"} Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.829790 5133 scope.go:117] "RemoveContainer" containerID="62687577ee38477642562b9cb7ae3be1bf47b33bf17d61b2d5e5f95a4568b330" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.840665 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.840695 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.840702 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.840715 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.840724 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:07Z","lastTransitionTime":"2025-11-21T13:43:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.847938 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c335cd8-618b-4871-a0e2-deaa61ddc49a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5f6320dbfb8d910e52de319fe5350b435c1c9f00a2e1d5b2b953fb6d1688984\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://273a23deb0bee7d80bc12f28a4056a5b843e81cc7c411273e49c3aa0fdba5182\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c93dddad8f7a853e1302ba96f3fc6d6626b22de64c8cfd1ee63996820d0816cd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebb7634a6507b2323d36c3d57b19c374862e0bada0e81150da9db315e5812f12\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:07Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.861715 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1b5e12d17b3e683349818698223816569514a9f4ae5d14ba1f5661c472fce39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:07Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.875485 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bj52j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9cc533c-2914-45d2-97b4-d6e35361450d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b1cfd49e0e5564696bd26f92acb10ca3430f81dc3f690a51ecd7bfa14876bccb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdp6q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bj52j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:07Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.889324 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-x5wnh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3aabda0-97d9-4886-8909-1c423c4d3238\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p8kp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p8kp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:43:07Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-x5wnh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:07Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.905767 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b9v8b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0ec3a98-4d89-4f36-a79e-ac65da8672ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://933bd113027e6a7ee2eba33df125dcdcf389c21b99c7dd32bc654edaf7278e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d21a06ad72a199d989c29326725c9d49df6fbb9fc6e9d54bcd1f7bb89c78b02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6d21a06ad72a199d989c29326725c9d49df6fbb9fc6e9d54bcd1f7bb89c78b02\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7216da1e17ab61329ca71325b40fdbd040dbf83f072d565302a571acb1313e53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7216da1e17ab61329ca71325b40fdbd040dbf83f072d565302a571acb1313e53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a466e70f0711cd7d64221f62309d99dfa9e4b9910a69b2397240cfdf244578\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3a466e70f0711cd7d64221f62309d99dfa9e4b9910a69b2397240cfdf244578\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:43:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a6a968fb55017d7e2fee878bb4d50904766820de016afd8f01de3bbd92b2421\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a6a968fb55017d7e2fee878bb4d50904766820de016afd8f01de3bbd92b2421\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:43:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:43:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b9v8b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:07Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.926217 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f826000-be5b-4f8f-bdc5-b80e11bb5e65\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cac876542527f108f89313704d6275aed6b735176f7f38b0fccbfcd79fdbf6e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa9a560543d545bd50cbb9aa0e907a992f9b3afb36de7ec5e72010dd835d2574\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c86055d41460f757efc29eaa62834faf3f14f9ca5ba534479d0fcd0a43d3bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T13:42:47Z\\\",\\\"message\\\":\\\"le observer\\\\nW1121 13:42:47.565555 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 13:42:47.567527 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 13:42:47.569658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3912986073/tls.crt::/tmp/serving-cert-3912986073/tls.key\\\\\\\"\\\\nI1121 13:42:47.852533 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 13:42:47.856751 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 13:42:47.856781 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 13:42:47.856814 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 13:42:47.856821 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 13:42:47.862211 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 13:42:47.862280 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862290 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862309 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 13:42:47.862319 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 13:42:47.862326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 13:42:47.862333 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 13:42:47.863057 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 13:42:47.865438 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6b16c41d8bc248fc4de65102a71d3875d1ab768432f61581605fa487ebfc9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:07Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.941384 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede95ef8b82acda5cadd081a37fcb2a35fab8269c7ec403bb33a6feb8bf9eb88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3037d1f01bc9704cae9aa3eb4760e4dc737b1990a6ae5a007d3ec412efad85a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:07Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.943400 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.943439 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.943450 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.943467 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.943479 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:07Z","lastTransitionTime":"2025-11-21T13:43:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.954848 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:07Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.966899 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1236e5c5c7c8db59fd2faa688e9b781fe94721cc8aa644dd9ab91df2684c617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:07Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.981013 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m5d24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0077329a-abad-4c6d-a601-2dc01fd83184\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24fe246ff402a8854ee5e55ccc507a2e497dbb2cdfed3f0f8b380f00b9436661\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmd8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m5d24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:07Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:07 crc kubenswrapper[5133]: I1121 13:43:07.994018 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f5a729-05d1-4f84-a216-1df3233af57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c46519115b067feef9d8fb5783b8b9420bf99d97515021a7d389e6cdf1d64112\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e5c730e837240b2ed45dff8a5411b8b49d21e7fbfb2dfcc6aef568b73b57745\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xxlvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:07Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.018387 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"373d5da7-fae9-4689-9ede-6e2d69a54c02\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce076f27563e648bcbfd183634e87e0e31cedc359d0df1edc6af448b2a18f1a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b6bfce121246f367a034c172b839a31fe309cfc0f83db7ab4e48cb26d6a5145\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c53aca99f41348a8343f7a2a2afd9ca78e2e4ba6aae9bb06cdb3ed66c9d79aa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aee1dbbc6cd90fac255e86ddb27f159eba2e08dc6cc749a8eb351842330ee6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://563b9e061f37ddab57173a01efbf7bf025c470edccc47a03e7c5bb1e317a289f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd5003cc4327d8234259623232e844463af0efdc0b3e395fa3e2c30c714b872d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62687577ee38477642562b9cb7ae3be1bf47b33bf17d61b2d5e5f95a4568b330\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62687577ee38477642562b9cb7ae3be1bf47b33bf17d61b2d5e5f95a4568b330\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"message\\\":\\\"0] Update event received for *factory.egressIPPod openshift-multus/network-metrics-daemon-x5wnh\\\\nI1121 13:43:07.426507 6391 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1121 13:43:07.426531 6391 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1121 13:43:07.426587 6391 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1121 13:43:07.426595 6391 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1121 13:43:07.426616 6391 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1121 13:43:07.426641 6391 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1121 13:43:07.426664 6391 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1121 13:43:07.426650 6391 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1121 13:43:07.426689 6391 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1121 13:43:07.426713 6391 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1121 13:43:07.426719 6391 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1121 13:43:07.426723 6391 factory.go:656] Stopping watch factory\\\\nI1121 13:43:07.426724 6391 handler.go:208] Removed *v1.Node event handler 2\\\\nI1121 13:43:07.426738 6391 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1121 13:43:07.426744 6391 ovnkube.go:599] Stopped ovnkube\\\\nI11\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:43:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ab3fdf87c8fc052cd429333579ede0e857fcc8399de947f661b159e6a5f2a93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tjzm8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:08Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.024146 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b3aabda0-97d9-4886-8909-1c423c4d3238-metrics-certs\") pod \"network-metrics-daemon-x5wnh\" (UID: \"b3aabda0-97d9-4886-8909-1c423c4d3238\") " pod="openshift-multus/network-metrics-daemon-x5wnh" Nov 21 13:43:08 crc kubenswrapper[5133]: E1121 13:43:08.024343 5133 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 21 13:43:08 crc kubenswrapper[5133]: E1121 13:43:08.024434 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b3aabda0-97d9-4886-8909-1c423c4d3238-metrics-certs podName:b3aabda0-97d9-4886-8909-1c423c4d3238 nodeName:}" failed. No retries permitted until 2025-11-21 13:43:09.024393251 +0000 UTC m=+48.822225499 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/b3aabda0-97d9-4886-8909-1c423c4d3238-metrics-certs") pod "network-metrics-daemon-x5wnh" (UID: "b3aabda0-97d9-4886-8909-1c423c4d3238") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.029772 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:08Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.042194 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7vfdg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c337e24-9cef-4932-92ae-5a175379c77a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0eeb4cc8e3340d9eb9710ca7b55244d4055d6bc7cc31a6d227f233988b242823\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrzcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc648165e145d8ed6a43aff3ffb558380dc55ffd03816922b643bf7f740088fa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrzcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:43:05Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7vfdg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:08Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.045925 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.045948 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.045965 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.045979 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.045990 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:08Z","lastTransitionTime":"2025-11-21T13:43:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.053283 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:08Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.062223 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pvdwc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87822156-53e8-4eb5-b241-db506a21a1b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc374f8a6deccf60de941df327adc9f29d951a10746bd754c0d4f5573a141a71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jzt65\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pvdwc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:08Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.148273 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.148329 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.148347 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.148370 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.148386 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:08Z","lastTransitionTime":"2025-11-21T13:43:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.252316 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.252376 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.252388 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.252410 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.252427 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:08Z","lastTransitionTime":"2025-11-21T13:43:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.355447 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.355508 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.355525 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.355550 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.355568 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:08Z","lastTransitionTime":"2025-11-21T13:43:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.456650 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:43:08 crc kubenswrapper[5133]: E1121 13:43:08.456803 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.457700 5133 scope.go:117] "RemoveContainer" containerID="5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73" Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.460422 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.460505 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.460526 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.460553 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.460577 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:08Z","lastTransitionTime":"2025-11-21T13:43:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.588245 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.588467 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.588525 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.588583 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.588639 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:08Z","lastTransitionTime":"2025-11-21T13:43:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.691754 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.691788 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.691796 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.691809 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.691819 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:08Z","lastTransitionTime":"2025-11-21T13:43:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.793921 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.793958 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.793967 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.793979 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.793990 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:08Z","lastTransitionTime":"2025-11-21T13:43:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.834716 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.836701 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"bca102c9301a0963f4be54906a6ca418a585d0aa6b063a0512c2a334928f0d88"} Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.837255 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.839483 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-tjzm8_373d5da7-fae9-4689-9ede-6e2d69a54c02/ovnkube-controller/0.log" Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.842764 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" event={"ID":"373d5da7-fae9-4689-9ede-6e2d69a54c02","Type":"ContainerStarted","Data":"19c3c094e3683ffe1c79217cc7824910561aa6a632f12caed78bfb761f30b726"} Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.843207 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.850550 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1b5e12d17b3e683349818698223816569514a9f4ae5d14ba1f5661c472fce39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:08Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.861874 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bj52j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9cc533c-2914-45d2-97b4-d6e35361450d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b1cfd49e0e5564696bd26f92acb10ca3430f81dc3f690a51ecd7bfa14876bccb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdp6q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bj52j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:08Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.873161 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-x5wnh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3aabda0-97d9-4886-8909-1c423c4d3238\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p8kp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p8kp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:43:07Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-x5wnh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:08Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.885093 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c335cd8-618b-4871-a0e2-deaa61ddc49a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5f6320dbfb8d910e52de319fe5350b435c1c9f00a2e1d5b2b953fb6d1688984\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://273a23deb0bee7d80bc12f28a4056a5b843e81cc7c411273e49c3aa0fdba5182\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c93dddad8f7a853e1302ba96f3fc6d6626b22de64c8cfd1ee63996820d0816cd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebb7634a6507b2323d36c3d57b19c374862e0bada0e81150da9db315e5812f12\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:08Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.896288 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.896336 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.896348 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.896365 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.896377 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:08Z","lastTransitionTime":"2025-11-21T13:43:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.900322 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede95ef8b82acda5cadd081a37fcb2a35fab8269c7ec403bb33a6feb8bf9eb88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3037d1f01bc9704cae9aa3eb4760e4dc737b1990a6ae5a007d3ec412efad85a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:08Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.914353 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:08Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.925413 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1236e5c5c7c8db59fd2faa688e9b781fe94721cc8aa644dd9ab91df2684c617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:08Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.943507 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m5d24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0077329a-abad-4c6d-a601-2dc01fd83184\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24fe246ff402a8854ee5e55ccc507a2e497dbb2cdfed3f0f8b380f00b9436661\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmd8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m5d24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:08Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.956738 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f5a729-05d1-4f84-a216-1df3233af57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c46519115b067feef9d8fb5783b8b9420bf99d97515021a7d389e6cdf1d64112\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e5c730e837240b2ed45dff8a5411b8b49d21e7fbfb2dfcc6aef568b73b57745\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xxlvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:08Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.978154 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"373d5da7-fae9-4689-9ede-6e2d69a54c02\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce076f27563e648bcbfd183634e87e0e31cedc359d0df1edc6af448b2a18f1a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b6bfce121246f367a034c172b839a31fe309cfc0f83db7ab4e48cb26d6a5145\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c53aca99f41348a8343f7a2a2afd9ca78e2e4ba6aae9bb06cdb3ed66c9d79aa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aee1dbbc6cd90fac255e86ddb27f159eba2e08dc6cc749a8eb351842330ee6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://563b9e061f37ddab57173a01efbf7bf025c470edccc47a03e7c5bb1e317a289f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd5003cc4327d8234259623232e844463af0efdc0b3e395fa3e2c30c714b872d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://62687577ee38477642562b9cb7ae3be1bf47b33bf17d61b2d5e5f95a4568b330\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62687577ee38477642562b9cb7ae3be1bf47b33bf17d61b2d5e5f95a4568b330\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"message\\\":\\\"0] Update event received for *factory.egressIPPod openshift-multus/network-metrics-daemon-x5wnh\\\\nI1121 13:43:07.426507 6391 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1121 13:43:07.426531 6391 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1121 13:43:07.426587 6391 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1121 13:43:07.426595 6391 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1121 13:43:07.426616 6391 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1121 13:43:07.426641 6391 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1121 13:43:07.426664 6391 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1121 13:43:07.426650 6391 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1121 13:43:07.426689 6391 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1121 13:43:07.426713 6391 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1121 13:43:07.426719 6391 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1121 13:43:07.426723 6391 factory.go:656] Stopping watch factory\\\\nI1121 13:43:07.426724 6391 handler.go:208] Removed *v1.Node event handler 2\\\\nI1121 13:43:07.426738 6391 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1121 13:43:07.426744 6391 ovnkube.go:599] Stopped ovnkube\\\\nI11\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:43:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ab3fdf87c8fc052cd429333579ede0e857fcc8399de947f661b159e6a5f2a93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tjzm8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:08Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.995043 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b9v8b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0ec3a98-4d89-4f36-a79e-ac65da8672ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://933bd113027e6a7ee2eba33df125dcdcf389c21b99c7dd32bc654edaf7278e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d21a06ad72a199d989c29326725c9d49df6fbb9fc6e9d54bcd1f7bb89c78b02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6d21a06ad72a199d989c29326725c9d49df6fbb9fc6e9d54bcd1f7bb89c78b02\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7216da1e17ab61329ca71325b40fdbd040dbf83f072d565302a571acb1313e53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7216da1e17ab61329ca71325b40fdbd040dbf83f072d565302a571acb1313e53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a466e70f0711cd7d64221f62309d99dfa9e4b9910a69b2397240cfdf244578\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3a466e70f0711cd7d64221f62309d99dfa9e4b9910a69b2397240cfdf244578\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:43:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a6a968fb55017d7e2fee878bb4d50904766820de016afd8f01de3bbd92b2421\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a6a968fb55017d7e2fee878bb4d50904766820de016afd8f01de3bbd92b2421\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:43:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:43:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b9v8b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:08Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.998797 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.998866 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.998886 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.998911 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:08 crc kubenswrapper[5133]: I1121 13:43:08.998930 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:08Z","lastTransitionTime":"2025-11-21T13:43:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.008930 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f826000-be5b-4f8f-bdc5-b80e11bb5e65\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cac876542527f108f89313704d6275aed6b735176f7f38b0fccbfcd79fdbf6e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa9a560543d545bd50cbb9aa0e907a992f9b3afb36de7ec5e72010dd835d2574\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c86055d41460f757efc29eaa62834faf3f14f9ca5ba534479d0fcd0a43d3bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bca102c9301a0963f4be54906a6ca418a585d0aa6b063a0512c2a334928f0d88\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T13:42:47Z\\\",\\\"message\\\":\\\"le observer\\\\nW1121 13:42:47.565555 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 13:42:47.567527 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 13:42:47.569658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3912986073/tls.crt::/tmp/serving-cert-3912986073/tls.key\\\\\\\"\\\\nI1121 13:42:47.852533 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 13:42:47.856751 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 13:42:47.856781 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 13:42:47.856814 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 13:42:47.856821 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 13:42:47.862211 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 13:42:47.862280 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862290 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862309 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 13:42:47.862319 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 13:42:47.862326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 13:42:47.862333 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 13:42:47.863057 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 13:42:47.865438 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6b16c41d8bc248fc4de65102a71d3875d1ab768432f61581605fa487ebfc9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:09Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.021565 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:09Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.032579 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7vfdg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c337e24-9cef-4932-92ae-5a175379c77a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0eeb4cc8e3340d9eb9710ca7b55244d4055d6bc7cc31a6d227f233988b242823\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrzcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc648165e145d8ed6a43aff3ffb558380dc55ffd03816922b643bf7f740088fa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrzcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:43:05Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7vfdg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:09Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.036469 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b3aabda0-97d9-4886-8909-1c423c4d3238-metrics-certs\") pod \"network-metrics-daemon-x5wnh\" (UID: \"b3aabda0-97d9-4886-8909-1c423c4d3238\") " pod="openshift-multus/network-metrics-daemon-x5wnh" Nov 21 13:43:09 crc kubenswrapper[5133]: E1121 13:43:09.036593 5133 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 21 13:43:09 crc kubenswrapper[5133]: E1121 13:43:09.036655 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b3aabda0-97d9-4886-8909-1c423c4d3238-metrics-certs podName:b3aabda0-97d9-4886-8909-1c423c4d3238 nodeName:}" failed. No retries permitted until 2025-11-21 13:43:11.036639394 +0000 UTC m=+50.834471642 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/b3aabda0-97d9-4886-8909-1c423c4d3238-metrics-certs") pod "network-metrics-daemon-x5wnh" (UID: "b3aabda0-97d9-4886-8909-1c423c4d3238") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.043328 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pvdwc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87822156-53e8-4eb5-b241-db506a21a1b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc374f8a6deccf60de941df327adc9f29d951a10746bd754c0d4f5573a141a71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jzt65\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pvdwc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:09Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.058207 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:09Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.068625 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7vfdg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c337e24-9cef-4932-92ae-5a175379c77a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0eeb4cc8e3340d9eb9710ca7b55244d4055d6bc7cc31a6d227f233988b242823\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrzcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc648165e145d8ed6a43aff3ffb558380dc55ffd03816922b643bf7f740088fa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrzcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:43:05Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7vfdg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:09Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.078964 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:09Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.092378 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:09Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.101196 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.101222 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.101230 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.101246 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.101257 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:09Z","lastTransitionTime":"2025-11-21T13:43:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.103187 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pvdwc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87822156-53e8-4eb5-b241-db506a21a1b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc374f8a6deccf60de941df327adc9f29d951a10746bd754c0d4f5573a141a71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jzt65\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pvdwc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:09Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.116156 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c335cd8-618b-4871-a0e2-deaa61ddc49a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5f6320dbfb8d910e52de319fe5350b435c1c9f00a2e1d5b2b953fb6d1688984\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://273a23deb0bee7d80bc12f28a4056a5b843e81cc7c411273e49c3aa0fdba5182\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c93dddad8f7a853e1302ba96f3fc6d6626b22de64c8cfd1ee63996820d0816cd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebb7634a6507b2323d36c3d57b19c374862e0bada0e81150da9db315e5812f12\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:09Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.128349 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1b5e12d17b3e683349818698223816569514a9f4ae5d14ba1f5661c472fce39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:09Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.140530 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bj52j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9cc533c-2914-45d2-97b4-d6e35361450d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b1cfd49e0e5564696bd26f92acb10ca3430f81dc3f690a51ecd7bfa14876bccb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdp6q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bj52j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:09Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.152097 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-x5wnh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3aabda0-97d9-4886-8909-1c423c4d3238\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p8kp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p8kp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:43:07Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-x5wnh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:09Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.165849 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m5d24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0077329a-abad-4c6d-a601-2dc01fd83184\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24fe246ff402a8854ee5e55ccc507a2e497dbb2cdfed3f0f8b380f00b9436661\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmd8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m5d24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:09Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.181478 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f5a729-05d1-4f84-a216-1df3233af57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c46519115b067feef9d8fb5783b8b9420bf99d97515021a7d389e6cdf1d64112\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e5c730e837240b2ed45dff8a5411b8b49d21e7fbfb2dfcc6aef568b73b57745\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xxlvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:09Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.203629 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.203684 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.203702 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.203729 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.203746 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:09Z","lastTransitionTime":"2025-11-21T13:43:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.213598 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"373d5da7-fae9-4689-9ede-6e2d69a54c02\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce076f27563e648bcbfd183634e87e0e31cedc359d0df1edc6af448b2a18f1a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b6bfce121246f367a034c172b839a31fe309cfc0f83db7ab4e48cb26d6a5145\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c53aca99f41348a8343f7a2a2afd9ca78e2e4ba6aae9bb06cdb3ed66c9d79aa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aee1dbbc6cd90fac255e86ddb27f159eba2e08dc6cc749a8eb351842330ee6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://563b9e061f37ddab57173a01efbf7bf025c470edccc47a03e7c5bb1e317a289f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd5003cc4327d8234259623232e844463af0efdc0b3e395fa3e2c30c714b872d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://19c3c094e3683ffe1c79217cc7824910561aa6a632f12caed78bfb761f30b726\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62687577ee38477642562b9cb7ae3be1bf47b33bf17d61b2d5e5f95a4568b330\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"message\\\":\\\"0] Update event received for *factory.egressIPPod openshift-multus/network-metrics-daemon-x5wnh\\\\nI1121 13:43:07.426507 6391 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1121 13:43:07.426531 6391 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1121 13:43:07.426587 6391 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1121 13:43:07.426595 6391 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1121 13:43:07.426616 6391 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1121 13:43:07.426641 6391 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1121 13:43:07.426664 6391 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1121 13:43:07.426650 6391 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1121 13:43:07.426689 6391 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1121 13:43:07.426713 6391 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1121 13:43:07.426719 6391 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1121 13:43:07.426723 6391 factory.go:656] Stopping watch factory\\\\nI1121 13:43:07.426724 6391 handler.go:208] Removed *v1.Node event handler 2\\\\nI1121 13:43:07.426738 6391 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1121 13:43:07.426744 6391 ovnkube.go:599] Stopped ovnkube\\\\nI11\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:43:00Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ab3fdf87c8fc052cd429333579ede0e857fcc8399de947f661b159e6a5f2a93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tjzm8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:09Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.239700 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b9v8b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0ec3a98-4d89-4f36-a79e-ac65da8672ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://933bd113027e6a7ee2eba33df125dcdcf389c21b99c7dd32bc654edaf7278e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d21a06ad72a199d989c29326725c9d49df6fbb9fc6e9d54bcd1f7bb89c78b02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6d21a06ad72a199d989c29326725c9d49df6fbb9fc6e9d54bcd1f7bb89c78b02\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7216da1e17ab61329ca71325b40fdbd040dbf83f072d565302a571acb1313e53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7216da1e17ab61329ca71325b40fdbd040dbf83f072d565302a571acb1313e53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a466e70f0711cd7d64221f62309d99dfa9e4b9910a69b2397240cfdf244578\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3a466e70f0711cd7d64221f62309d99dfa9e4b9910a69b2397240cfdf244578\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:43:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a6a968fb55017d7e2fee878bb4d50904766820de016afd8f01de3bbd92b2421\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a6a968fb55017d7e2fee878bb4d50904766820de016afd8f01de3bbd92b2421\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:43:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:43:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b9v8b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:09Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.261604 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f826000-be5b-4f8f-bdc5-b80e11bb5e65\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cac876542527f108f89313704d6275aed6b735176f7f38b0fccbfcd79fdbf6e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa9a560543d545bd50cbb9aa0e907a992f9b3afb36de7ec5e72010dd835d2574\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c86055d41460f757efc29eaa62834faf3f14f9ca5ba534479d0fcd0a43d3bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bca102c9301a0963f4be54906a6ca418a585d0aa6b063a0512c2a334928f0d88\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T13:42:47Z\\\",\\\"message\\\":\\\"le observer\\\\nW1121 13:42:47.565555 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 13:42:47.567527 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 13:42:47.569658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3912986073/tls.crt::/tmp/serving-cert-3912986073/tls.key\\\\\\\"\\\\nI1121 13:42:47.852533 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 13:42:47.856751 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 13:42:47.856781 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 13:42:47.856814 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 13:42:47.856821 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 13:42:47.862211 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 13:42:47.862280 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862290 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862309 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 13:42:47.862319 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 13:42:47.862326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 13:42:47.862333 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 13:42:47.863057 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 13:42:47.865438 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6b16c41d8bc248fc4de65102a71d3875d1ab768432f61581605fa487ebfc9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:09Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.281756 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede95ef8b82acda5cadd081a37fcb2a35fab8269c7ec403bb33a6feb8bf9eb88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3037d1f01bc9704cae9aa3eb4760e4dc737b1990a6ae5a007d3ec412efad85a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:09Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.299191 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:09Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.308864 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.308926 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.308948 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.308976 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.309046 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:09Z","lastTransitionTime":"2025-11-21T13:43:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.314371 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1236e5c5c7c8db59fd2faa688e9b781fe94721cc8aa644dd9ab91df2684c617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:09Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.415151 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.415476 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.416390 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.417461 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.417505 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:09Z","lastTransitionTime":"2025-11-21T13:43:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.456865 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.456944 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:43:09 crc kubenswrapper[5133]: E1121 13:43:09.457043 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 13:43:09 crc kubenswrapper[5133]: E1121 13:43:09.457166 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.457332 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-x5wnh" Nov 21 13:43:09 crc kubenswrapper[5133]: E1121 13:43:09.457478 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-x5wnh" podUID="b3aabda0-97d9-4886-8909-1c423c4d3238" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.521190 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.521240 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.521257 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.521280 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.521298 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:09Z","lastTransitionTime":"2025-11-21T13:43:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.623322 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.623360 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.623371 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.623386 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.623396 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:09Z","lastTransitionTime":"2025-11-21T13:43:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.725749 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.725784 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.725793 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.725808 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.725817 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:09Z","lastTransitionTime":"2025-11-21T13:43:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.828872 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.828915 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.828931 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.828952 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.828967 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:09Z","lastTransitionTime":"2025-11-21T13:43:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.847838 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-tjzm8_373d5da7-fae9-4689-9ede-6e2d69a54c02/ovnkube-controller/1.log" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.848374 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-tjzm8_373d5da7-fae9-4689-9ede-6e2d69a54c02/ovnkube-controller/0.log" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.850872 5133 generic.go:334] "Generic (PLEG): container finished" podID="373d5da7-fae9-4689-9ede-6e2d69a54c02" containerID="19c3c094e3683ffe1c79217cc7824910561aa6a632f12caed78bfb761f30b726" exitCode=1 Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.852053 5133 scope.go:117] "RemoveContainer" containerID="19c3c094e3683ffe1c79217cc7824910561aa6a632f12caed78bfb761f30b726" Nov 21 13:43:09 crc kubenswrapper[5133]: E1121 13:43:09.852192 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-tjzm8_openshift-ovn-kubernetes(373d5da7-fae9-4689-9ede-6e2d69a54c02)\"" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" podUID="373d5da7-fae9-4689-9ede-6e2d69a54c02" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.852244 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" event={"ID":"373d5da7-fae9-4689-9ede-6e2d69a54c02","Type":"ContainerDied","Data":"19c3c094e3683ffe1c79217cc7824910561aa6a632f12caed78bfb761f30b726"} Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.852275 5133 scope.go:117] "RemoveContainer" containerID="62687577ee38477642562b9cb7ae3be1bf47b33bf17d61b2d5e5f95a4568b330" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.877465 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bj52j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9cc533c-2914-45d2-97b4-d6e35361450d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b1cfd49e0e5564696bd26f92acb10ca3430f81dc3f690a51ecd7bfa14876bccb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdp6q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bj52j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:09Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.890507 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-x5wnh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3aabda0-97d9-4886-8909-1c423c4d3238\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p8kp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p8kp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:43:07Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-x5wnh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:09Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.903934 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c335cd8-618b-4871-a0e2-deaa61ddc49a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5f6320dbfb8d910e52de319fe5350b435c1c9f00a2e1d5b2b953fb6d1688984\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://273a23deb0bee7d80bc12f28a4056a5b843e81cc7c411273e49c3aa0fdba5182\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c93dddad8f7a853e1302ba96f3fc6d6626b22de64c8cfd1ee63996820d0816cd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebb7634a6507b2323d36c3d57b19c374862e0bada0e81150da9db315e5812f12\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:09Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.916797 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1b5e12d17b3e683349818698223816569514a9f4ae5d14ba1f5661c472fce39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:09Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.930825 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.930862 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.930870 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.930884 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.930892 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:09Z","lastTransitionTime":"2025-11-21T13:43:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.932245 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:09Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.947142 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1236e5c5c7c8db59fd2faa688e9b781fe94721cc8aa644dd9ab91df2684c617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:09Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.960072 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m5d24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0077329a-abad-4c6d-a601-2dc01fd83184\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24fe246ff402a8854ee5e55ccc507a2e497dbb2cdfed3f0f8b380f00b9436661\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmd8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m5d24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:09Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.971255 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f5a729-05d1-4f84-a216-1df3233af57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c46519115b067feef9d8fb5783b8b9420bf99d97515021a7d389e6cdf1d64112\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e5c730e837240b2ed45dff8a5411b8b49d21e7fbfb2dfcc6aef568b73b57745\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xxlvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:09Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:09 crc kubenswrapper[5133]: I1121 13:43:09.990092 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"373d5da7-fae9-4689-9ede-6e2d69a54c02\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce076f27563e648bcbfd183634e87e0e31cedc359d0df1edc6af448b2a18f1a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b6bfce121246f367a034c172b839a31fe309cfc0f83db7ab4e48cb26d6a5145\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c53aca99f41348a8343f7a2a2afd9ca78e2e4ba6aae9bb06cdb3ed66c9d79aa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aee1dbbc6cd90fac255e86ddb27f159eba2e08dc6cc749a8eb351842330ee6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://563b9e061f37ddab57173a01efbf7bf025c470edccc47a03e7c5bb1e317a289f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd5003cc4327d8234259623232e844463af0efdc0b3e395fa3e2c30c714b872d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://19c3c094e3683ffe1c79217cc7824910561aa6a632f12caed78bfb761f30b726\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://62687577ee38477642562b9cb7ae3be1bf47b33bf17d61b2d5e5f95a4568b330\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"message\\\":\\\"0] Update event received for *factory.egressIPPod openshift-multus/network-metrics-daemon-x5wnh\\\\nI1121 13:43:07.426507 6391 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1121 13:43:07.426531 6391 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1121 13:43:07.426587 6391 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1121 13:43:07.426595 6391 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1121 13:43:07.426616 6391 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1121 13:43:07.426641 6391 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1121 13:43:07.426664 6391 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1121 13:43:07.426650 6391 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1121 13:43:07.426689 6391 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1121 13:43:07.426713 6391 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1121 13:43:07.426719 6391 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1121 13:43:07.426723 6391 factory.go:656] Stopping watch factory\\\\nI1121 13:43:07.426724 6391 handler.go:208] Removed *v1.Node event handler 2\\\\nI1121 13:43:07.426738 6391 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1121 13:43:07.426744 6391 ovnkube.go:599] Stopped ovnkube\\\\nI11\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:43:00Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19c3c094e3683ffe1c79217cc7824910561aa6a632f12caed78bfb761f30b726\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T13:43:09Z\\\",\\\"message\\\":\\\"ontroller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:08Z is after 2025-08-24T17:21:41Z]\\\\nI1121 13:43:08.996396 6628 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-machine-config-operator/machine-config-daemon]} name:Service_openshift-machine-config-operator/machine-config-daemon_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.43:8798: 10.217.4.43:9001:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {a36f6289-d09f-43f8-8a8a-c9d2cc11eb0d}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1121 13:43:08.996441 6628 obj_retry.go:303] Retry objec\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:43:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ab3fdf87c8fc052cd429333579ede0e857fcc8399de947f661b159e6a5f2a93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tjzm8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:09Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:10 crc kubenswrapper[5133]: I1121 13:43:10.006491 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b9v8b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0ec3a98-4d89-4f36-a79e-ac65da8672ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://933bd113027e6a7ee2eba33df125dcdcf389c21b99c7dd32bc654edaf7278e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d21a06ad72a199d989c29326725c9d49df6fbb9fc6e9d54bcd1f7bb89c78b02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6d21a06ad72a199d989c29326725c9d49df6fbb9fc6e9d54bcd1f7bb89c78b02\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7216da1e17ab61329ca71325b40fdbd040dbf83f072d565302a571acb1313e53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7216da1e17ab61329ca71325b40fdbd040dbf83f072d565302a571acb1313e53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a466e70f0711cd7d64221f62309d99dfa9e4b9910a69b2397240cfdf244578\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3a466e70f0711cd7d64221f62309d99dfa9e4b9910a69b2397240cfdf244578\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:43:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a6a968fb55017d7e2fee878bb4d50904766820de016afd8f01de3bbd92b2421\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a6a968fb55017d7e2fee878bb4d50904766820de016afd8f01de3bbd92b2421\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:43:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:43:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b9v8b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:10Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:10 crc kubenswrapper[5133]: I1121 13:43:10.023444 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f826000-be5b-4f8f-bdc5-b80e11bb5e65\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cac876542527f108f89313704d6275aed6b735176f7f38b0fccbfcd79fdbf6e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa9a560543d545bd50cbb9aa0e907a992f9b3afb36de7ec5e72010dd835d2574\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c86055d41460f757efc29eaa62834faf3f14f9ca5ba534479d0fcd0a43d3bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bca102c9301a0963f4be54906a6ca418a585d0aa6b063a0512c2a334928f0d88\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T13:42:47Z\\\",\\\"message\\\":\\\"le observer\\\\nW1121 13:42:47.565555 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 13:42:47.567527 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 13:42:47.569658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3912986073/tls.crt::/tmp/serving-cert-3912986073/tls.key\\\\\\\"\\\\nI1121 13:42:47.852533 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 13:42:47.856751 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 13:42:47.856781 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 13:42:47.856814 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 13:42:47.856821 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 13:42:47.862211 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 13:42:47.862280 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862290 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862309 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 13:42:47.862319 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 13:42:47.862326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 13:42:47.862333 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 13:42:47.863057 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 13:42:47.865438 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6b16c41d8bc248fc4de65102a71d3875d1ab768432f61581605fa487ebfc9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:10Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:10 crc kubenswrapper[5133]: I1121 13:43:10.033836 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:10 crc kubenswrapper[5133]: I1121 13:43:10.033897 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:10 crc kubenswrapper[5133]: I1121 13:43:10.033916 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:10 crc kubenswrapper[5133]: I1121 13:43:10.033940 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:10 crc kubenswrapper[5133]: I1121 13:43:10.033957 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:10Z","lastTransitionTime":"2025-11-21T13:43:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:10 crc kubenswrapper[5133]: I1121 13:43:10.035486 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede95ef8b82acda5cadd081a37fcb2a35fab8269c7ec403bb33a6feb8bf9eb88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3037d1f01bc9704cae9aa3eb4760e4dc737b1990a6ae5a007d3ec412efad85a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:10Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:10 crc kubenswrapper[5133]: I1121 13:43:10.048568 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:10Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:10 crc kubenswrapper[5133]: I1121 13:43:10.059346 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7vfdg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c337e24-9cef-4932-92ae-5a175379c77a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0eeb4cc8e3340d9eb9710ca7b55244d4055d6bc7cc31a6d227f233988b242823\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrzcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc648165e145d8ed6a43aff3ffb558380dc55ffd03816922b643bf7f740088fa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrzcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:43:05Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7vfdg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:10Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:10 crc kubenswrapper[5133]: I1121 13:43:10.070300 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:10Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:10 crc kubenswrapper[5133]: I1121 13:43:10.079516 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pvdwc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87822156-53e8-4eb5-b241-db506a21a1b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc374f8a6deccf60de941df327adc9f29d951a10746bd754c0d4f5573a141a71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jzt65\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pvdwc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:10Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:10 crc kubenswrapper[5133]: I1121 13:43:10.137095 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:10 crc kubenswrapper[5133]: I1121 13:43:10.137151 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:10 crc kubenswrapper[5133]: I1121 13:43:10.137159 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:10 crc kubenswrapper[5133]: I1121 13:43:10.137174 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:10 crc kubenswrapper[5133]: I1121 13:43:10.137183 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:10Z","lastTransitionTime":"2025-11-21T13:43:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:10 crc kubenswrapper[5133]: I1121 13:43:10.240687 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:10 crc kubenswrapper[5133]: I1121 13:43:10.240742 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:10 crc kubenswrapper[5133]: I1121 13:43:10.240759 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:10 crc kubenswrapper[5133]: I1121 13:43:10.240783 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:10 crc kubenswrapper[5133]: I1121 13:43:10.240802 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:10Z","lastTransitionTime":"2025-11-21T13:43:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:10 crc kubenswrapper[5133]: I1121 13:43:10.344156 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:10 crc kubenswrapper[5133]: I1121 13:43:10.344226 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:10 crc kubenswrapper[5133]: I1121 13:43:10.344249 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:10 crc kubenswrapper[5133]: I1121 13:43:10.344279 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:10 crc kubenswrapper[5133]: I1121 13:43:10.344299 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:10Z","lastTransitionTime":"2025-11-21T13:43:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:10 crc kubenswrapper[5133]: I1121 13:43:10.447832 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:10 crc kubenswrapper[5133]: I1121 13:43:10.447894 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:10 crc kubenswrapper[5133]: I1121 13:43:10.447911 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:10 crc kubenswrapper[5133]: I1121 13:43:10.447934 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:10 crc kubenswrapper[5133]: I1121 13:43:10.447952 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:10Z","lastTransitionTime":"2025-11-21T13:43:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:10 crc kubenswrapper[5133]: I1121 13:43:10.457374 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:43:10 crc kubenswrapper[5133]: E1121 13:43:10.457551 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 13:43:10 crc kubenswrapper[5133]: I1121 13:43:10.550561 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:10 crc kubenswrapper[5133]: I1121 13:43:10.550620 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:10 crc kubenswrapper[5133]: I1121 13:43:10.550636 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:10 crc kubenswrapper[5133]: I1121 13:43:10.550660 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:10 crc kubenswrapper[5133]: I1121 13:43:10.550677 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:10Z","lastTransitionTime":"2025-11-21T13:43:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:10 crc kubenswrapper[5133]: I1121 13:43:10.653613 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:10 crc kubenswrapper[5133]: I1121 13:43:10.653683 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:10 crc kubenswrapper[5133]: I1121 13:43:10.653706 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:10 crc kubenswrapper[5133]: I1121 13:43:10.653735 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:10 crc kubenswrapper[5133]: I1121 13:43:10.653757 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:10Z","lastTransitionTime":"2025-11-21T13:43:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:10 crc kubenswrapper[5133]: I1121 13:43:10.755881 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:10 crc kubenswrapper[5133]: I1121 13:43:10.755968 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:10 crc kubenswrapper[5133]: I1121 13:43:10.756029 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:10 crc kubenswrapper[5133]: I1121 13:43:10.756063 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:10 crc kubenswrapper[5133]: I1121 13:43:10.756086 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:10Z","lastTransitionTime":"2025-11-21T13:43:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:10 crc kubenswrapper[5133]: I1121 13:43:10.860850 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:10 crc kubenswrapper[5133]: I1121 13:43:10.861925 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:10 crc kubenswrapper[5133]: I1121 13:43:10.861970 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:10 crc kubenswrapper[5133]: I1121 13:43:10.862061 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:10 crc kubenswrapper[5133]: I1121 13:43:10.862137 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:10Z","lastTransitionTime":"2025-11-21T13:43:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:10 crc kubenswrapper[5133]: I1121 13:43:10.862361 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-tjzm8_373d5da7-fae9-4689-9ede-6e2d69a54c02/ovnkube-controller/1.log" Nov 21 13:43:10 crc kubenswrapper[5133]: I1121 13:43:10.866694 5133 scope.go:117] "RemoveContainer" containerID="19c3c094e3683ffe1c79217cc7824910561aa6a632f12caed78bfb761f30b726" Nov 21 13:43:10 crc kubenswrapper[5133]: E1121 13:43:10.866904 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-tjzm8_openshift-ovn-kubernetes(373d5da7-fae9-4689-9ede-6e2d69a54c02)\"" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" podUID="373d5da7-fae9-4689-9ede-6e2d69a54c02" Nov 21 13:43:10 crc kubenswrapper[5133]: I1121 13:43:10.888370 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:10Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:10 crc kubenswrapper[5133]: I1121 13:43:10.908609 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7vfdg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c337e24-9cef-4932-92ae-5a175379c77a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0eeb4cc8e3340d9eb9710ca7b55244d4055d6bc7cc31a6d227f233988b242823\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrzcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc648165e145d8ed6a43aff3ffb558380dc55ffd03816922b643bf7f740088fa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrzcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:43:05Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7vfdg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:10Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:10 crc kubenswrapper[5133]: I1121 13:43:10.931140 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:10Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:10 crc kubenswrapper[5133]: I1121 13:43:10.944989 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pvdwc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87822156-53e8-4eb5-b241-db506a21a1b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc374f8a6deccf60de941df327adc9f29d951a10746bd754c0d4f5573a141a71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jzt65\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pvdwc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:10Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:10 crc kubenswrapper[5133]: I1121 13:43:10.963885 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c335cd8-618b-4871-a0e2-deaa61ddc49a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5f6320dbfb8d910e52de319fe5350b435c1c9f00a2e1d5b2b953fb6d1688984\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://273a23deb0bee7d80bc12f28a4056a5b843e81cc7c411273e49c3aa0fdba5182\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c93dddad8f7a853e1302ba96f3fc6d6626b22de64c8cfd1ee63996820d0816cd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebb7634a6507b2323d36c3d57b19c374862e0bada0e81150da9db315e5812f12\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:10Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:10 crc kubenswrapper[5133]: I1121 13:43:10.965871 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:10 crc kubenswrapper[5133]: I1121 13:43:10.965918 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:10 crc kubenswrapper[5133]: I1121 13:43:10.965933 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:10 crc kubenswrapper[5133]: I1121 13:43:10.965952 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:10 crc kubenswrapper[5133]: I1121 13:43:10.965968 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:10Z","lastTransitionTime":"2025-11-21T13:43:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:10 crc kubenswrapper[5133]: I1121 13:43:10.979200 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1b5e12d17b3e683349818698223816569514a9f4ae5d14ba1f5661c472fce39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:10Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:10 crc kubenswrapper[5133]: I1121 13:43:10.991785 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bj52j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9cc533c-2914-45d2-97b4-d6e35361450d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b1cfd49e0e5564696bd26f92acb10ca3430f81dc3f690a51ecd7bfa14876bccb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdp6q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bj52j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:10Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:11 crc kubenswrapper[5133]: I1121 13:43:11.007503 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-x5wnh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3aabda0-97d9-4886-8909-1c423c4d3238\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p8kp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p8kp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:43:07Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-x5wnh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:11Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:11 crc kubenswrapper[5133]: I1121 13:43:11.030330 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f826000-be5b-4f8f-bdc5-b80e11bb5e65\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cac876542527f108f89313704d6275aed6b735176f7f38b0fccbfcd79fdbf6e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa9a560543d545bd50cbb9aa0e907a992f9b3afb36de7ec5e72010dd835d2574\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c86055d41460f757efc29eaa62834faf3f14f9ca5ba534479d0fcd0a43d3bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bca102c9301a0963f4be54906a6ca418a585d0aa6b063a0512c2a334928f0d88\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T13:42:47Z\\\",\\\"message\\\":\\\"le observer\\\\nW1121 13:42:47.565555 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 13:42:47.567527 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 13:42:47.569658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3912986073/tls.crt::/tmp/serving-cert-3912986073/tls.key\\\\\\\"\\\\nI1121 13:42:47.852533 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 13:42:47.856751 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 13:42:47.856781 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 13:42:47.856814 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 13:42:47.856821 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 13:42:47.862211 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 13:42:47.862280 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862290 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862309 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 13:42:47.862319 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 13:42:47.862326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 13:42:47.862333 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 13:42:47.863057 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 13:42:47.865438 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6b16c41d8bc248fc4de65102a71d3875d1ab768432f61581605fa487ebfc9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:11Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:11 crc kubenswrapper[5133]: I1121 13:43:11.048767 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede95ef8b82acda5cadd081a37fcb2a35fab8269c7ec403bb33a6feb8bf9eb88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3037d1f01bc9704cae9aa3eb4760e4dc737b1990a6ae5a007d3ec412efad85a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:11Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:11 crc kubenswrapper[5133]: I1121 13:43:11.058856 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b3aabda0-97d9-4886-8909-1c423c4d3238-metrics-certs\") pod \"network-metrics-daemon-x5wnh\" (UID: \"b3aabda0-97d9-4886-8909-1c423c4d3238\") " pod="openshift-multus/network-metrics-daemon-x5wnh" Nov 21 13:43:11 crc kubenswrapper[5133]: E1121 13:43:11.058990 5133 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 21 13:43:11 crc kubenswrapper[5133]: E1121 13:43:11.059088 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b3aabda0-97d9-4886-8909-1c423c4d3238-metrics-certs podName:b3aabda0-97d9-4886-8909-1c423c4d3238 nodeName:}" failed. No retries permitted until 2025-11-21 13:43:15.059068654 +0000 UTC m=+54.856900972 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/b3aabda0-97d9-4886-8909-1c423c4d3238-metrics-certs") pod "network-metrics-daemon-x5wnh" (UID: "b3aabda0-97d9-4886-8909-1c423c4d3238") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 21 13:43:11 crc kubenswrapper[5133]: I1121 13:43:11.062760 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:11Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:11 crc kubenswrapper[5133]: I1121 13:43:11.068985 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:11 crc kubenswrapper[5133]: I1121 13:43:11.069042 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:11 crc kubenswrapper[5133]: I1121 13:43:11.069056 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:11 crc kubenswrapper[5133]: I1121 13:43:11.069073 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:11 crc kubenswrapper[5133]: I1121 13:43:11.069093 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:11Z","lastTransitionTime":"2025-11-21T13:43:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:11 crc kubenswrapper[5133]: I1121 13:43:11.075379 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1236e5c5c7c8db59fd2faa688e9b781fe94721cc8aa644dd9ab91df2684c617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:11Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:11 crc kubenswrapper[5133]: I1121 13:43:11.092897 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m5d24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0077329a-abad-4c6d-a601-2dc01fd83184\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24fe246ff402a8854ee5e55ccc507a2e497dbb2cdfed3f0f8b380f00b9436661\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmd8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m5d24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:11Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:11 crc kubenswrapper[5133]: I1121 13:43:11.104925 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f5a729-05d1-4f84-a216-1df3233af57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c46519115b067feef9d8fb5783b8b9420bf99d97515021a7d389e6cdf1d64112\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e5c730e837240b2ed45dff8a5411b8b49d21e7fbfb2dfcc6aef568b73b57745\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xxlvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:11Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:11 crc kubenswrapper[5133]: I1121 13:43:11.126657 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"373d5da7-fae9-4689-9ede-6e2d69a54c02\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce076f27563e648bcbfd183634e87e0e31cedc359d0df1edc6af448b2a18f1a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b6bfce121246f367a034c172b839a31fe309cfc0f83db7ab4e48cb26d6a5145\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c53aca99f41348a8343f7a2a2afd9ca78e2e4ba6aae9bb06cdb3ed66c9d79aa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aee1dbbc6cd90fac255e86ddb27f159eba2e08dc6cc749a8eb351842330ee6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://563b9e061f37ddab57173a01efbf7bf025c470edccc47a03e7c5bb1e317a289f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd5003cc4327d8234259623232e844463af0efdc0b3e395fa3e2c30c714b872d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://19c3c094e3683ffe1c79217cc7824910561aa6a632f12caed78bfb761f30b726\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19c3c094e3683ffe1c79217cc7824910561aa6a632f12caed78bfb761f30b726\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T13:43:09Z\\\",\\\"message\\\":\\\"ontroller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:08Z is after 2025-08-24T17:21:41Z]\\\\nI1121 13:43:08.996396 6628 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-machine-config-operator/machine-config-daemon]} name:Service_openshift-machine-config-operator/machine-config-daemon_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.43:8798: 10.217.4.43:9001:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {a36f6289-d09f-43f8-8a8a-c9d2cc11eb0d}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1121 13:43:08.996441 6628 obj_retry.go:303] Retry objec\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:43:07Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-tjzm8_openshift-ovn-kubernetes(373d5da7-fae9-4689-9ede-6e2d69a54c02)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ab3fdf87c8fc052cd429333579ede0e857fcc8399de947f661b159e6a5f2a93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tjzm8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:11Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:11 crc kubenswrapper[5133]: I1121 13:43:11.144850 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b9v8b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0ec3a98-4d89-4f36-a79e-ac65da8672ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://933bd113027e6a7ee2eba33df125dcdcf389c21b99c7dd32bc654edaf7278e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d21a06ad72a199d989c29326725c9d49df6fbb9fc6e9d54bcd1f7bb89c78b02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6d21a06ad72a199d989c29326725c9d49df6fbb9fc6e9d54bcd1f7bb89c78b02\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7216da1e17ab61329ca71325b40fdbd040dbf83f072d565302a571acb1313e53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7216da1e17ab61329ca71325b40fdbd040dbf83f072d565302a571acb1313e53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a466e70f0711cd7d64221f62309d99dfa9e4b9910a69b2397240cfdf244578\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3a466e70f0711cd7d64221f62309d99dfa9e4b9910a69b2397240cfdf244578\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:43:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a6a968fb55017d7e2fee878bb4d50904766820de016afd8f01de3bbd92b2421\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a6a968fb55017d7e2fee878bb4d50904766820de016afd8f01de3bbd92b2421\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:43:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:43:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b9v8b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:11Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:11 crc kubenswrapper[5133]: I1121 13:43:11.172059 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:11 crc kubenswrapper[5133]: I1121 13:43:11.172124 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:11 crc kubenswrapper[5133]: I1121 13:43:11.172136 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:11 crc kubenswrapper[5133]: I1121 13:43:11.172154 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:11 crc kubenswrapper[5133]: I1121 13:43:11.172493 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:11Z","lastTransitionTime":"2025-11-21T13:43:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:11 crc kubenswrapper[5133]: I1121 13:43:11.276735 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:11 crc kubenswrapper[5133]: I1121 13:43:11.276793 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:11 crc kubenswrapper[5133]: I1121 13:43:11.276810 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:11 crc kubenswrapper[5133]: I1121 13:43:11.276833 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:11 crc kubenswrapper[5133]: I1121 13:43:11.276850 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:11Z","lastTransitionTime":"2025-11-21T13:43:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:11 crc kubenswrapper[5133]: I1121 13:43:11.380597 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:11 crc kubenswrapper[5133]: I1121 13:43:11.380657 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:11 crc kubenswrapper[5133]: I1121 13:43:11.380675 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:11 crc kubenswrapper[5133]: I1121 13:43:11.380698 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:11 crc kubenswrapper[5133]: I1121 13:43:11.380715 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:11Z","lastTransitionTime":"2025-11-21T13:43:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:11 crc kubenswrapper[5133]: I1121 13:43:11.457258 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-x5wnh" Nov 21 13:43:11 crc kubenswrapper[5133]: I1121 13:43:11.457258 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:43:11 crc kubenswrapper[5133]: I1121 13:43:11.457451 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:43:11 crc kubenswrapper[5133]: E1121 13:43:11.457727 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 13:43:11 crc kubenswrapper[5133]: E1121 13:43:11.457842 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 13:43:11 crc kubenswrapper[5133]: E1121 13:43:11.457682 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-x5wnh" podUID="b3aabda0-97d9-4886-8909-1c423c4d3238" Nov 21 13:43:11 crc kubenswrapper[5133]: I1121 13:43:11.483723 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:11 crc kubenswrapper[5133]: I1121 13:43:11.483775 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:11 crc kubenswrapper[5133]: I1121 13:43:11.483794 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:11 crc kubenswrapper[5133]: I1121 13:43:11.483818 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:11 crc kubenswrapper[5133]: I1121 13:43:11.483835 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:11Z","lastTransitionTime":"2025-11-21T13:43:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:11 crc kubenswrapper[5133]: I1121 13:43:11.586871 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:11 crc kubenswrapper[5133]: I1121 13:43:11.586968 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:11 crc kubenswrapper[5133]: I1121 13:43:11.586990 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:11 crc kubenswrapper[5133]: I1121 13:43:11.587115 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:11 crc kubenswrapper[5133]: I1121 13:43:11.587141 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:11Z","lastTransitionTime":"2025-11-21T13:43:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:11 crc kubenswrapper[5133]: I1121 13:43:11.690542 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:11 crc kubenswrapper[5133]: I1121 13:43:11.690614 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:11 crc kubenswrapper[5133]: I1121 13:43:11.690631 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:11 crc kubenswrapper[5133]: I1121 13:43:11.690655 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:11 crc kubenswrapper[5133]: I1121 13:43:11.690673 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:11Z","lastTransitionTime":"2025-11-21T13:43:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:11 crc kubenswrapper[5133]: I1121 13:43:11.795838 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:11 crc kubenswrapper[5133]: I1121 13:43:11.795906 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:11 crc kubenswrapper[5133]: I1121 13:43:11.795923 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:11 crc kubenswrapper[5133]: I1121 13:43:11.795946 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:11 crc kubenswrapper[5133]: I1121 13:43:11.795963 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:11Z","lastTransitionTime":"2025-11-21T13:43:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:11 crc kubenswrapper[5133]: I1121 13:43:11.899796 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:11 crc kubenswrapper[5133]: I1121 13:43:11.899846 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:11 crc kubenswrapper[5133]: I1121 13:43:11.899856 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:11 crc kubenswrapper[5133]: I1121 13:43:11.899898 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:11 crc kubenswrapper[5133]: I1121 13:43:11.899914 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:11Z","lastTransitionTime":"2025-11-21T13:43:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:12 crc kubenswrapper[5133]: I1121 13:43:12.001902 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:12 crc kubenswrapper[5133]: I1121 13:43:12.001962 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:12 crc kubenswrapper[5133]: I1121 13:43:12.001984 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:12 crc kubenswrapper[5133]: I1121 13:43:12.002027 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:12 crc kubenswrapper[5133]: I1121 13:43:12.002043 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:12Z","lastTransitionTime":"2025-11-21T13:43:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:12 crc kubenswrapper[5133]: I1121 13:43:12.105718 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:12 crc kubenswrapper[5133]: I1121 13:43:12.105844 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:12 crc kubenswrapper[5133]: I1121 13:43:12.105863 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:12 crc kubenswrapper[5133]: I1121 13:43:12.105925 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:12 crc kubenswrapper[5133]: I1121 13:43:12.105944 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:12Z","lastTransitionTime":"2025-11-21T13:43:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:12 crc kubenswrapper[5133]: I1121 13:43:12.208923 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:12 crc kubenswrapper[5133]: I1121 13:43:12.209028 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:12 crc kubenswrapper[5133]: I1121 13:43:12.209047 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:12 crc kubenswrapper[5133]: I1121 13:43:12.209072 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:12 crc kubenswrapper[5133]: I1121 13:43:12.209089 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:12Z","lastTransitionTime":"2025-11-21T13:43:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:12 crc kubenswrapper[5133]: I1121 13:43:12.311908 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:12 crc kubenswrapper[5133]: I1121 13:43:12.312037 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:12 crc kubenswrapper[5133]: I1121 13:43:12.312057 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:12 crc kubenswrapper[5133]: I1121 13:43:12.312084 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:12 crc kubenswrapper[5133]: I1121 13:43:12.312102 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:12Z","lastTransitionTime":"2025-11-21T13:43:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:12 crc kubenswrapper[5133]: I1121 13:43:12.415404 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:12 crc kubenswrapper[5133]: I1121 13:43:12.415472 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:12 crc kubenswrapper[5133]: I1121 13:43:12.415494 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:12 crc kubenswrapper[5133]: I1121 13:43:12.415521 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:12 crc kubenswrapper[5133]: I1121 13:43:12.415544 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:12Z","lastTransitionTime":"2025-11-21T13:43:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:12 crc kubenswrapper[5133]: I1121 13:43:12.456957 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:43:12 crc kubenswrapper[5133]: E1121 13:43:12.457199 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 13:43:12 crc kubenswrapper[5133]: I1121 13:43:12.476559 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c335cd8-618b-4871-a0e2-deaa61ddc49a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5f6320dbfb8d910e52de319fe5350b435c1c9f00a2e1d5b2b953fb6d1688984\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://273a23deb0bee7d80bc12f28a4056a5b843e81cc7c411273e49c3aa0fdba5182\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c93dddad8f7a853e1302ba96f3fc6d6626b22de64c8cfd1ee63996820d0816cd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebb7634a6507b2323d36c3d57b19c374862e0bada0e81150da9db315e5812f12\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:12Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:12 crc kubenswrapper[5133]: I1121 13:43:12.491802 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1b5e12d17b3e683349818698223816569514a9f4ae5d14ba1f5661c472fce39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:12Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:12 crc kubenswrapper[5133]: I1121 13:43:12.502919 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bj52j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9cc533c-2914-45d2-97b4-d6e35361450d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b1cfd49e0e5564696bd26f92acb10ca3430f81dc3f690a51ecd7bfa14876bccb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdp6q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bj52j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:12Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:12 crc kubenswrapper[5133]: I1121 13:43:12.511817 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-x5wnh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3aabda0-97d9-4886-8909-1c423c4d3238\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p8kp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p8kp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:43:07Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-x5wnh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:12Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:12 crc kubenswrapper[5133]: I1121 13:43:12.522140 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:12 crc kubenswrapper[5133]: I1121 13:43:12.522219 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:12 crc kubenswrapper[5133]: I1121 13:43:12.522244 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:12 crc kubenswrapper[5133]: I1121 13:43:12.522275 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:12 crc kubenswrapper[5133]: I1121 13:43:12.522295 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:12Z","lastTransitionTime":"2025-11-21T13:43:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:12 crc kubenswrapper[5133]: I1121 13:43:12.532632 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m5d24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0077329a-abad-4c6d-a601-2dc01fd83184\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24fe246ff402a8854ee5e55ccc507a2e497dbb2cdfed3f0f8b380f00b9436661\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmd8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m5d24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:12Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:12 crc kubenswrapper[5133]: I1121 13:43:12.549611 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f5a729-05d1-4f84-a216-1df3233af57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c46519115b067feef9d8fb5783b8b9420bf99d97515021a7d389e6cdf1d64112\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e5c730e837240b2ed45dff8a5411b8b49d21e7fbfb2dfcc6aef568b73b57745\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xxlvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:12Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:12 crc kubenswrapper[5133]: I1121 13:43:12.580947 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"373d5da7-fae9-4689-9ede-6e2d69a54c02\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce076f27563e648bcbfd183634e87e0e31cedc359d0df1edc6af448b2a18f1a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b6bfce121246f367a034c172b839a31fe309cfc0f83db7ab4e48cb26d6a5145\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c53aca99f41348a8343f7a2a2afd9ca78e2e4ba6aae9bb06cdb3ed66c9d79aa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aee1dbbc6cd90fac255e86ddb27f159eba2e08dc6cc749a8eb351842330ee6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://563b9e061f37ddab57173a01efbf7bf025c470edccc47a03e7c5bb1e317a289f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd5003cc4327d8234259623232e844463af0efdc0b3e395fa3e2c30c714b872d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://19c3c094e3683ffe1c79217cc7824910561aa6a632f12caed78bfb761f30b726\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19c3c094e3683ffe1c79217cc7824910561aa6a632f12caed78bfb761f30b726\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T13:43:09Z\\\",\\\"message\\\":\\\"ontroller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:08Z is after 2025-08-24T17:21:41Z]\\\\nI1121 13:43:08.996396 6628 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-machine-config-operator/machine-config-daemon]} name:Service_openshift-machine-config-operator/machine-config-daemon_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.43:8798: 10.217.4.43:9001:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {a36f6289-d09f-43f8-8a8a-c9d2cc11eb0d}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1121 13:43:08.996441 6628 obj_retry.go:303] Retry objec\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:43:07Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-tjzm8_openshift-ovn-kubernetes(373d5da7-fae9-4689-9ede-6e2d69a54c02)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ab3fdf87c8fc052cd429333579ede0e857fcc8399de947f661b159e6a5f2a93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tjzm8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:12Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:12 crc kubenswrapper[5133]: I1121 13:43:12.605537 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b9v8b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0ec3a98-4d89-4f36-a79e-ac65da8672ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://933bd113027e6a7ee2eba33df125dcdcf389c21b99c7dd32bc654edaf7278e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d21a06ad72a199d989c29326725c9d49df6fbb9fc6e9d54bcd1f7bb89c78b02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6d21a06ad72a199d989c29326725c9d49df6fbb9fc6e9d54bcd1f7bb89c78b02\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7216da1e17ab61329ca71325b40fdbd040dbf83f072d565302a571acb1313e53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7216da1e17ab61329ca71325b40fdbd040dbf83f072d565302a571acb1313e53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a466e70f0711cd7d64221f62309d99dfa9e4b9910a69b2397240cfdf244578\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3a466e70f0711cd7d64221f62309d99dfa9e4b9910a69b2397240cfdf244578\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:43:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a6a968fb55017d7e2fee878bb4d50904766820de016afd8f01de3bbd92b2421\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a6a968fb55017d7e2fee878bb4d50904766820de016afd8f01de3bbd92b2421\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:43:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:43:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b9v8b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:12Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:12 crc kubenswrapper[5133]: I1121 13:43:12.621588 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f826000-be5b-4f8f-bdc5-b80e11bb5e65\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cac876542527f108f89313704d6275aed6b735176f7f38b0fccbfcd79fdbf6e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa9a560543d545bd50cbb9aa0e907a992f9b3afb36de7ec5e72010dd835d2574\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c86055d41460f757efc29eaa62834faf3f14f9ca5ba534479d0fcd0a43d3bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bca102c9301a0963f4be54906a6ca418a585d0aa6b063a0512c2a334928f0d88\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T13:42:47Z\\\",\\\"message\\\":\\\"le observer\\\\nW1121 13:42:47.565555 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 13:42:47.567527 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 13:42:47.569658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3912986073/tls.crt::/tmp/serving-cert-3912986073/tls.key\\\\\\\"\\\\nI1121 13:42:47.852533 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 13:42:47.856751 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 13:42:47.856781 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 13:42:47.856814 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 13:42:47.856821 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 13:42:47.862211 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 13:42:47.862280 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862290 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862309 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 13:42:47.862319 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 13:42:47.862326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 13:42:47.862333 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 13:42:47.863057 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 13:42:47.865438 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6b16c41d8bc248fc4de65102a71d3875d1ab768432f61581605fa487ebfc9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:12Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:12 crc kubenswrapper[5133]: I1121 13:43:12.625550 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:12 crc kubenswrapper[5133]: I1121 13:43:12.625575 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:12 crc kubenswrapper[5133]: I1121 13:43:12.625584 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:12 crc kubenswrapper[5133]: I1121 13:43:12.625604 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:12 crc kubenswrapper[5133]: I1121 13:43:12.625616 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:12Z","lastTransitionTime":"2025-11-21T13:43:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:12 crc kubenswrapper[5133]: I1121 13:43:12.642948 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede95ef8b82acda5cadd081a37fcb2a35fab8269c7ec403bb33a6feb8bf9eb88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3037d1f01bc9704cae9aa3eb4760e4dc737b1990a6ae5a007d3ec412efad85a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:12Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:12 crc kubenswrapper[5133]: I1121 13:43:12.682024 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:12Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:12 crc kubenswrapper[5133]: I1121 13:43:12.704015 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1236e5c5c7c8db59fd2faa688e9b781fe94721cc8aa644dd9ab91df2684c617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:12Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:12 crc kubenswrapper[5133]: I1121 13:43:12.719663 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7vfdg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c337e24-9cef-4932-92ae-5a175379c77a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0eeb4cc8e3340d9eb9710ca7b55244d4055d6bc7cc31a6d227f233988b242823\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrzcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc648165e145d8ed6a43aff3ffb558380dc55ffd03816922b643bf7f740088fa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrzcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:43:05Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7vfdg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:12Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:12 crc kubenswrapper[5133]: I1121 13:43:12.727796 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:12 crc kubenswrapper[5133]: I1121 13:43:12.727842 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:12 crc kubenswrapper[5133]: I1121 13:43:12.727857 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:12 crc kubenswrapper[5133]: I1121 13:43:12.727875 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:12 crc kubenswrapper[5133]: I1121 13:43:12.727888 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:12Z","lastTransitionTime":"2025-11-21T13:43:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:12 crc kubenswrapper[5133]: I1121 13:43:12.738990 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:12Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:12 crc kubenswrapper[5133]: I1121 13:43:12.755821 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:12Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:12 crc kubenswrapper[5133]: I1121 13:43:12.767097 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pvdwc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87822156-53e8-4eb5-b241-db506a21a1b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc374f8a6deccf60de941df327adc9f29d951a10746bd754c0d4f5573a141a71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jzt65\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pvdwc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:12Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:12 crc kubenswrapper[5133]: I1121 13:43:12.830366 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:12 crc kubenswrapper[5133]: I1121 13:43:12.830408 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:12 crc kubenswrapper[5133]: I1121 13:43:12.830417 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:12 crc kubenswrapper[5133]: I1121 13:43:12.830433 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:12 crc kubenswrapper[5133]: I1121 13:43:12.830443 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:12Z","lastTransitionTime":"2025-11-21T13:43:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:12 crc kubenswrapper[5133]: I1121 13:43:12.933118 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:12 crc kubenswrapper[5133]: I1121 13:43:12.933169 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:12 crc kubenswrapper[5133]: I1121 13:43:12.933186 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:12 crc kubenswrapper[5133]: I1121 13:43:12.933210 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:12 crc kubenswrapper[5133]: I1121 13:43:12.933226 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:12Z","lastTransitionTime":"2025-11-21T13:43:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:13 crc kubenswrapper[5133]: I1121 13:43:13.036141 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:13 crc kubenswrapper[5133]: I1121 13:43:13.036202 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:13 crc kubenswrapper[5133]: I1121 13:43:13.036227 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:13 crc kubenswrapper[5133]: I1121 13:43:13.036256 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:13 crc kubenswrapper[5133]: I1121 13:43:13.036276 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:13Z","lastTransitionTime":"2025-11-21T13:43:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:13 crc kubenswrapper[5133]: I1121 13:43:13.141791 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:13 crc kubenswrapper[5133]: I1121 13:43:13.141849 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:13 crc kubenswrapper[5133]: I1121 13:43:13.141862 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:13 crc kubenswrapper[5133]: I1121 13:43:13.141880 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:13 crc kubenswrapper[5133]: I1121 13:43:13.141892 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:13Z","lastTransitionTime":"2025-11-21T13:43:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:13 crc kubenswrapper[5133]: I1121 13:43:13.244931 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:13 crc kubenswrapper[5133]: I1121 13:43:13.244971 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:13 crc kubenswrapper[5133]: I1121 13:43:13.244980 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:13 crc kubenswrapper[5133]: I1121 13:43:13.244992 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:13 crc kubenswrapper[5133]: I1121 13:43:13.245020 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:13Z","lastTransitionTime":"2025-11-21T13:43:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:13 crc kubenswrapper[5133]: I1121 13:43:13.348139 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:13 crc kubenswrapper[5133]: I1121 13:43:13.348190 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:13 crc kubenswrapper[5133]: I1121 13:43:13.348206 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:13 crc kubenswrapper[5133]: I1121 13:43:13.348227 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:13 crc kubenswrapper[5133]: I1121 13:43:13.348244 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:13Z","lastTransitionTime":"2025-11-21T13:43:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:13 crc kubenswrapper[5133]: I1121 13:43:13.451194 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:13 crc kubenswrapper[5133]: I1121 13:43:13.451270 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:13 crc kubenswrapper[5133]: I1121 13:43:13.451292 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:13 crc kubenswrapper[5133]: I1121 13:43:13.451321 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:13 crc kubenswrapper[5133]: I1121 13:43:13.451344 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:13Z","lastTransitionTime":"2025-11-21T13:43:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:13 crc kubenswrapper[5133]: I1121 13:43:13.456735 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:43:13 crc kubenswrapper[5133]: I1121 13:43:13.456868 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:43:13 crc kubenswrapper[5133]: I1121 13:43:13.456764 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-x5wnh" Nov 21 13:43:13 crc kubenswrapper[5133]: E1121 13:43:13.456960 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 13:43:13 crc kubenswrapper[5133]: E1121 13:43:13.457154 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 13:43:13 crc kubenswrapper[5133]: E1121 13:43:13.457280 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-x5wnh" podUID="b3aabda0-97d9-4886-8909-1c423c4d3238" Nov 21 13:43:13 crc kubenswrapper[5133]: I1121 13:43:13.553672 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:13 crc kubenswrapper[5133]: I1121 13:43:13.553702 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:13 crc kubenswrapper[5133]: I1121 13:43:13.553710 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:13 crc kubenswrapper[5133]: I1121 13:43:13.553723 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:13 crc kubenswrapper[5133]: I1121 13:43:13.553732 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:13Z","lastTransitionTime":"2025-11-21T13:43:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:13 crc kubenswrapper[5133]: I1121 13:43:13.656567 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:13 crc kubenswrapper[5133]: I1121 13:43:13.656610 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:13 crc kubenswrapper[5133]: I1121 13:43:13.656621 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:13 crc kubenswrapper[5133]: I1121 13:43:13.656639 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:13 crc kubenswrapper[5133]: I1121 13:43:13.656651 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:13Z","lastTransitionTime":"2025-11-21T13:43:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:13 crc kubenswrapper[5133]: I1121 13:43:13.760047 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:13 crc kubenswrapper[5133]: I1121 13:43:13.760130 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:13 crc kubenswrapper[5133]: I1121 13:43:13.760148 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:13 crc kubenswrapper[5133]: I1121 13:43:13.760175 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:13 crc kubenswrapper[5133]: I1121 13:43:13.760194 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:13Z","lastTransitionTime":"2025-11-21T13:43:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:13 crc kubenswrapper[5133]: I1121 13:43:13.863436 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:13 crc kubenswrapper[5133]: I1121 13:43:13.863527 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:13 crc kubenswrapper[5133]: I1121 13:43:13.863551 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:13 crc kubenswrapper[5133]: I1121 13:43:13.863579 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:13 crc kubenswrapper[5133]: I1121 13:43:13.863596 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:13Z","lastTransitionTime":"2025-11-21T13:43:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:13 crc kubenswrapper[5133]: I1121 13:43:13.966267 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:13 crc kubenswrapper[5133]: I1121 13:43:13.966338 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:13 crc kubenswrapper[5133]: I1121 13:43:13.966361 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:13 crc kubenswrapper[5133]: I1121 13:43:13.966392 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:13 crc kubenswrapper[5133]: I1121 13:43:13.966415 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:13Z","lastTransitionTime":"2025-11-21T13:43:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:14 crc kubenswrapper[5133]: I1121 13:43:14.069926 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:14 crc kubenswrapper[5133]: I1121 13:43:14.070061 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:14 crc kubenswrapper[5133]: I1121 13:43:14.070083 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:14 crc kubenswrapper[5133]: I1121 13:43:14.070114 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:14 crc kubenswrapper[5133]: I1121 13:43:14.070138 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:14Z","lastTransitionTime":"2025-11-21T13:43:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:14 crc kubenswrapper[5133]: I1121 13:43:14.174571 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:14 crc kubenswrapper[5133]: I1121 13:43:14.174645 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:14 crc kubenswrapper[5133]: I1121 13:43:14.174661 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:14 crc kubenswrapper[5133]: I1121 13:43:14.174685 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:14 crc kubenswrapper[5133]: I1121 13:43:14.174701 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:14Z","lastTransitionTime":"2025-11-21T13:43:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:14 crc kubenswrapper[5133]: I1121 13:43:14.277442 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:14 crc kubenswrapper[5133]: I1121 13:43:14.277811 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:14 crc kubenswrapper[5133]: I1121 13:43:14.277944 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:14 crc kubenswrapper[5133]: I1121 13:43:14.278160 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:14 crc kubenswrapper[5133]: I1121 13:43:14.278317 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:14Z","lastTransitionTime":"2025-11-21T13:43:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:14 crc kubenswrapper[5133]: I1121 13:43:14.381687 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:14 crc kubenswrapper[5133]: I1121 13:43:14.381721 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:14 crc kubenswrapper[5133]: I1121 13:43:14.381730 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:14 crc kubenswrapper[5133]: I1121 13:43:14.381744 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:14 crc kubenswrapper[5133]: I1121 13:43:14.381754 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:14Z","lastTransitionTime":"2025-11-21T13:43:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:14 crc kubenswrapper[5133]: I1121 13:43:14.457351 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:43:14 crc kubenswrapper[5133]: E1121 13:43:14.457474 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 13:43:14 crc kubenswrapper[5133]: I1121 13:43:14.484384 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:14 crc kubenswrapper[5133]: I1121 13:43:14.484430 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:14 crc kubenswrapper[5133]: I1121 13:43:14.484446 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:14 crc kubenswrapper[5133]: I1121 13:43:14.484466 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:14 crc kubenswrapper[5133]: I1121 13:43:14.484481 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:14Z","lastTransitionTime":"2025-11-21T13:43:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:14 crc kubenswrapper[5133]: I1121 13:43:14.588702 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:14 crc kubenswrapper[5133]: I1121 13:43:14.588758 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:14 crc kubenswrapper[5133]: I1121 13:43:14.588770 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:14 crc kubenswrapper[5133]: I1121 13:43:14.588790 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:14 crc kubenswrapper[5133]: I1121 13:43:14.588803 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:14Z","lastTransitionTime":"2025-11-21T13:43:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:14 crc kubenswrapper[5133]: I1121 13:43:14.692032 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:14 crc kubenswrapper[5133]: I1121 13:43:14.692081 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:14 crc kubenswrapper[5133]: I1121 13:43:14.692093 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:14 crc kubenswrapper[5133]: I1121 13:43:14.692109 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:14 crc kubenswrapper[5133]: I1121 13:43:14.692120 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:14Z","lastTransitionTime":"2025-11-21T13:43:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:14 crc kubenswrapper[5133]: I1121 13:43:14.795725 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:14 crc kubenswrapper[5133]: I1121 13:43:14.795829 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:14 crc kubenswrapper[5133]: I1121 13:43:14.795854 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:14 crc kubenswrapper[5133]: I1121 13:43:14.795882 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:14 crc kubenswrapper[5133]: I1121 13:43:14.795904 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:14Z","lastTransitionTime":"2025-11-21T13:43:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:14 crc kubenswrapper[5133]: I1121 13:43:14.898911 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:14 crc kubenswrapper[5133]: I1121 13:43:14.898988 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:14 crc kubenswrapper[5133]: I1121 13:43:14.899128 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:14 crc kubenswrapper[5133]: I1121 13:43:14.899166 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:14 crc kubenswrapper[5133]: I1121 13:43:14.899190 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:14Z","lastTransitionTime":"2025-11-21T13:43:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.001578 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.001655 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.001680 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.001709 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.001732 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:15Z","lastTransitionTime":"2025-11-21T13:43:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.104291 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.104340 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.104358 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.104380 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.104399 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:15Z","lastTransitionTime":"2025-11-21T13:43:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.104898 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b3aabda0-97d9-4886-8909-1c423c4d3238-metrics-certs\") pod \"network-metrics-daemon-x5wnh\" (UID: \"b3aabda0-97d9-4886-8909-1c423c4d3238\") " pod="openshift-multus/network-metrics-daemon-x5wnh" Nov 21 13:43:15 crc kubenswrapper[5133]: E1121 13:43:15.105150 5133 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 21 13:43:15 crc kubenswrapper[5133]: E1121 13:43:15.105256 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b3aabda0-97d9-4886-8909-1c423c4d3238-metrics-certs podName:b3aabda0-97d9-4886-8909-1c423c4d3238 nodeName:}" failed. No retries permitted until 2025-11-21 13:43:23.105226781 +0000 UTC m=+62.903059069 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/b3aabda0-97d9-4886-8909-1c423c4d3238-metrics-certs") pod "network-metrics-daemon-x5wnh" (UID: "b3aabda0-97d9-4886-8909-1c423c4d3238") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.137876 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.137946 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.137967 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.137995 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.138054 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:15Z","lastTransitionTime":"2025-11-21T13:43:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:15 crc kubenswrapper[5133]: E1121 13:43:15.160322 5133 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:15Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:15Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:15Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:15Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"eb1f691e-5306-40d5-9666-4e51161aa15a\\\",\\\"systemUUID\\\":\\\"537cb059-79e6-48e5-b353-57bb495db8a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:15Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.165846 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.165900 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.165919 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.165951 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.165973 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:15Z","lastTransitionTime":"2025-11-21T13:43:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:15 crc kubenswrapper[5133]: E1121 13:43:15.185686 5133 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:15Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:15Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:15Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:15Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"eb1f691e-5306-40d5-9666-4e51161aa15a\\\",\\\"systemUUID\\\":\\\"537cb059-79e6-48e5-b353-57bb495db8a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:15Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.191369 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.191440 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.191458 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.191490 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.191509 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:15Z","lastTransitionTime":"2025-11-21T13:43:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:15 crc kubenswrapper[5133]: E1121 13:43:15.211987 5133 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:15Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:15Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:15Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:15Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"eb1f691e-5306-40d5-9666-4e51161aa15a\\\",\\\"systemUUID\\\":\\\"537cb059-79e6-48e5-b353-57bb495db8a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:15Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.219314 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.219390 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.219422 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.219456 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.219476 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:15Z","lastTransitionTime":"2025-11-21T13:43:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:15 crc kubenswrapper[5133]: E1121 13:43:15.238783 5133 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:15Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:15Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:15Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:15Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"eb1f691e-5306-40d5-9666-4e51161aa15a\\\",\\\"systemUUID\\\":\\\"537cb059-79e6-48e5-b353-57bb495db8a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:15Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.243817 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.243884 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.243902 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.243928 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.243948 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:15Z","lastTransitionTime":"2025-11-21T13:43:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:15 crc kubenswrapper[5133]: E1121 13:43:15.264089 5133 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:15Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:15Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:15Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:15Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"eb1f691e-5306-40d5-9666-4e51161aa15a\\\",\\\"systemUUID\\\":\\\"537cb059-79e6-48e5-b353-57bb495db8a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:15Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:15 crc kubenswrapper[5133]: E1121 13:43:15.264351 5133 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.268380 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.268442 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.268457 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.268477 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.268492 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:15Z","lastTransitionTime":"2025-11-21T13:43:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.371827 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.371876 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.371894 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.371918 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.371938 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:15Z","lastTransitionTime":"2025-11-21T13:43:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.457340 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.457461 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.457340 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-x5wnh" Nov 21 13:43:15 crc kubenswrapper[5133]: E1121 13:43:15.457531 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 13:43:15 crc kubenswrapper[5133]: E1121 13:43:15.457650 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 13:43:15 crc kubenswrapper[5133]: E1121 13:43:15.457782 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-x5wnh" podUID="b3aabda0-97d9-4886-8909-1c423c4d3238" Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.475100 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.475188 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.475223 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.475250 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.475268 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:15Z","lastTransitionTime":"2025-11-21T13:43:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.578436 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.578472 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.578483 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.578501 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.578512 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:15Z","lastTransitionTime":"2025-11-21T13:43:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.683783 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.683894 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.683918 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.683947 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.683970 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:15Z","lastTransitionTime":"2025-11-21T13:43:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.790984 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.791098 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.791127 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.791374 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.791405 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:15Z","lastTransitionTime":"2025-11-21T13:43:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.895525 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.895611 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.895638 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.895665 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.895688 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:15Z","lastTransitionTime":"2025-11-21T13:43:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.998362 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.998418 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.998615 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.998639 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:15 crc kubenswrapper[5133]: I1121 13:43:15.998656 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:15Z","lastTransitionTime":"2025-11-21T13:43:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:16 crc kubenswrapper[5133]: I1121 13:43:16.101801 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:16 crc kubenswrapper[5133]: I1121 13:43:16.101870 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:16 crc kubenswrapper[5133]: I1121 13:43:16.101888 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:16 crc kubenswrapper[5133]: I1121 13:43:16.101911 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:16 crc kubenswrapper[5133]: I1121 13:43:16.101929 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:16Z","lastTransitionTime":"2025-11-21T13:43:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:16 crc kubenswrapper[5133]: I1121 13:43:16.205672 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:16 crc kubenswrapper[5133]: I1121 13:43:16.205738 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:16 crc kubenswrapper[5133]: I1121 13:43:16.205756 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:16 crc kubenswrapper[5133]: I1121 13:43:16.205782 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:16 crc kubenswrapper[5133]: I1121 13:43:16.205807 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:16Z","lastTransitionTime":"2025-11-21T13:43:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:16 crc kubenswrapper[5133]: I1121 13:43:16.308520 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:16 crc kubenswrapper[5133]: I1121 13:43:16.308570 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:16 crc kubenswrapper[5133]: I1121 13:43:16.308581 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:16 crc kubenswrapper[5133]: I1121 13:43:16.308598 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:16 crc kubenswrapper[5133]: I1121 13:43:16.308610 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:16Z","lastTransitionTime":"2025-11-21T13:43:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:16 crc kubenswrapper[5133]: I1121 13:43:16.411036 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:16 crc kubenswrapper[5133]: I1121 13:43:16.411077 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:16 crc kubenswrapper[5133]: I1121 13:43:16.411087 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:16 crc kubenswrapper[5133]: I1121 13:43:16.411102 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:16 crc kubenswrapper[5133]: I1121 13:43:16.411113 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:16Z","lastTransitionTime":"2025-11-21T13:43:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:16 crc kubenswrapper[5133]: I1121 13:43:16.456650 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:43:16 crc kubenswrapper[5133]: E1121 13:43:16.456786 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 13:43:16 crc kubenswrapper[5133]: I1121 13:43:16.513829 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:16 crc kubenswrapper[5133]: I1121 13:43:16.513877 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:16 crc kubenswrapper[5133]: I1121 13:43:16.513893 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:16 crc kubenswrapper[5133]: I1121 13:43:16.513910 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:16 crc kubenswrapper[5133]: I1121 13:43:16.513921 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:16Z","lastTransitionTime":"2025-11-21T13:43:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:16 crc kubenswrapper[5133]: I1121 13:43:16.617144 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:16 crc kubenswrapper[5133]: I1121 13:43:16.617189 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:16 crc kubenswrapper[5133]: I1121 13:43:16.617200 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:16 crc kubenswrapper[5133]: I1121 13:43:16.617216 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:16 crc kubenswrapper[5133]: I1121 13:43:16.617227 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:16Z","lastTransitionTime":"2025-11-21T13:43:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:16 crc kubenswrapper[5133]: I1121 13:43:16.719954 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:16 crc kubenswrapper[5133]: I1121 13:43:16.720014 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:16 crc kubenswrapper[5133]: I1121 13:43:16.720027 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:16 crc kubenswrapper[5133]: I1121 13:43:16.720043 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:16 crc kubenswrapper[5133]: I1121 13:43:16.720056 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:16Z","lastTransitionTime":"2025-11-21T13:43:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:16 crc kubenswrapper[5133]: I1121 13:43:16.822839 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:16 crc kubenswrapper[5133]: I1121 13:43:16.822904 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:16 crc kubenswrapper[5133]: I1121 13:43:16.822921 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:16 crc kubenswrapper[5133]: I1121 13:43:16.822947 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:16 crc kubenswrapper[5133]: I1121 13:43:16.822965 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:16Z","lastTransitionTime":"2025-11-21T13:43:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:16 crc kubenswrapper[5133]: I1121 13:43:16.925765 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:16 crc kubenswrapper[5133]: I1121 13:43:16.925826 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:16 crc kubenswrapper[5133]: I1121 13:43:16.925847 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:16 crc kubenswrapper[5133]: I1121 13:43:16.925870 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:16 crc kubenswrapper[5133]: I1121 13:43:16.925887 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:16Z","lastTransitionTime":"2025-11-21T13:43:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:17 crc kubenswrapper[5133]: I1121 13:43:17.029208 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:17 crc kubenswrapper[5133]: I1121 13:43:17.029260 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:17 crc kubenswrapper[5133]: I1121 13:43:17.029277 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:17 crc kubenswrapper[5133]: I1121 13:43:17.029299 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:17 crc kubenswrapper[5133]: I1121 13:43:17.029316 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:17Z","lastTransitionTime":"2025-11-21T13:43:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:17 crc kubenswrapper[5133]: I1121 13:43:17.132868 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:17 crc kubenswrapper[5133]: I1121 13:43:17.132928 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:17 crc kubenswrapper[5133]: I1121 13:43:17.132950 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:17 crc kubenswrapper[5133]: I1121 13:43:17.132976 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:17 crc kubenswrapper[5133]: I1121 13:43:17.133036 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:17Z","lastTransitionTime":"2025-11-21T13:43:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:17 crc kubenswrapper[5133]: I1121 13:43:17.235465 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:17 crc kubenswrapper[5133]: I1121 13:43:17.235545 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:17 crc kubenswrapper[5133]: I1121 13:43:17.235569 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:17 crc kubenswrapper[5133]: I1121 13:43:17.235595 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:17 crc kubenswrapper[5133]: I1121 13:43:17.235617 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:17Z","lastTransitionTime":"2025-11-21T13:43:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:17 crc kubenswrapper[5133]: I1121 13:43:17.338242 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:17 crc kubenswrapper[5133]: I1121 13:43:17.338314 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:17 crc kubenswrapper[5133]: I1121 13:43:17.338337 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:17 crc kubenswrapper[5133]: I1121 13:43:17.338367 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:17 crc kubenswrapper[5133]: I1121 13:43:17.338392 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:17Z","lastTransitionTime":"2025-11-21T13:43:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:17 crc kubenswrapper[5133]: I1121 13:43:17.441951 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:17 crc kubenswrapper[5133]: I1121 13:43:17.442029 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:17 crc kubenswrapper[5133]: I1121 13:43:17.442043 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:17 crc kubenswrapper[5133]: I1121 13:43:17.442061 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:17 crc kubenswrapper[5133]: I1121 13:43:17.442074 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:17Z","lastTransitionTime":"2025-11-21T13:43:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:17 crc kubenswrapper[5133]: I1121 13:43:17.457317 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:43:17 crc kubenswrapper[5133]: I1121 13:43:17.457333 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-x5wnh" Nov 21 13:43:17 crc kubenswrapper[5133]: E1121 13:43:17.457508 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 13:43:17 crc kubenswrapper[5133]: I1121 13:43:17.457345 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:43:17 crc kubenswrapper[5133]: E1121 13:43:17.457647 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-x5wnh" podUID="b3aabda0-97d9-4886-8909-1c423c4d3238" Nov 21 13:43:17 crc kubenswrapper[5133]: E1121 13:43:17.457772 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 13:43:17 crc kubenswrapper[5133]: I1121 13:43:17.544639 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:17 crc kubenswrapper[5133]: I1121 13:43:17.544712 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:17 crc kubenswrapper[5133]: I1121 13:43:17.544736 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:17 crc kubenswrapper[5133]: I1121 13:43:17.544765 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:17 crc kubenswrapper[5133]: I1121 13:43:17.544788 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:17Z","lastTransitionTime":"2025-11-21T13:43:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:17 crc kubenswrapper[5133]: I1121 13:43:17.647613 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:17 crc kubenswrapper[5133]: I1121 13:43:17.647681 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:17 crc kubenswrapper[5133]: I1121 13:43:17.647704 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:17 crc kubenswrapper[5133]: I1121 13:43:17.647734 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:17 crc kubenswrapper[5133]: I1121 13:43:17.647757 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:17Z","lastTransitionTime":"2025-11-21T13:43:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:17 crc kubenswrapper[5133]: I1121 13:43:17.753720 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:17 crc kubenswrapper[5133]: I1121 13:43:17.754481 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:17 crc kubenswrapper[5133]: I1121 13:43:17.754517 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:17 crc kubenswrapper[5133]: I1121 13:43:17.754545 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:17 crc kubenswrapper[5133]: I1121 13:43:17.754567 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:17Z","lastTransitionTime":"2025-11-21T13:43:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:17 crc kubenswrapper[5133]: I1121 13:43:17.858270 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:17 crc kubenswrapper[5133]: I1121 13:43:17.858347 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:17 crc kubenswrapper[5133]: I1121 13:43:17.858362 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:17 crc kubenswrapper[5133]: I1121 13:43:17.858403 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:17 crc kubenswrapper[5133]: I1121 13:43:17.858419 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:17Z","lastTransitionTime":"2025-11-21T13:43:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:17 crc kubenswrapper[5133]: I1121 13:43:17.961847 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:17 crc kubenswrapper[5133]: I1121 13:43:17.961924 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:17 crc kubenswrapper[5133]: I1121 13:43:17.961945 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:17 crc kubenswrapper[5133]: I1121 13:43:17.961967 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:17 crc kubenswrapper[5133]: I1121 13:43:17.961986 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:17Z","lastTransitionTime":"2025-11-21T13:43:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:18 crc kubenswrapper[5133]: I1121 13:43:18.064672 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:18 crc kubenswrapper[5133]: I1121 13:43:18.064707 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:18 crc kubenswrapper[5133]: I1121 13:43:18.064715 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:18 crc kubenswrapper[5133]: I1121 13:43:18.064729 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:18 crc kubenswrapper[5133]: I1121 13:43:18.064738 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:18Z","lastTransitionTime":"2025-11-21T13:43:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:18 crc kubenswrapper[5133]: I1121 13:43:18.167675 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:18 crc kubenswrapper[5133]: I1121 13:43:18.167739 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:18 crc kubenswrapper[5133]: I1121 13:43:18.167757 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:18 crc kubenswrapper[5133]: I1121 13:43:18.167781 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:18 crc kubenswrapper[5133]: I1121 13:43:18.167799 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:18Z","lastTransitionTime":"2025-11-21T13:43:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:18 crc kubenswrapper[5133]: I1121 13:43:18.270201 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:18 crc kubenswrapper[5133]: I1121 13:43:18.270242 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:18 crc kubenswrapper[5133]: I1121 13:43:18.270254 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:18 crc kubenswrapper[5133]: I1121 13:43:18.270269 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:18 crc kubenswrapper[5133]: I1121 13:43:18.270281 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:18Z","lastTransitionTime":"2025-11-21T13:43:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:18 crc kubenswrapper[5133]: I1121 13:43:18.372841 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:18 crc kubenswrapper[5133]: I1121 13:43:18.372901 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:18 crc kubenswrapper[5133]: I1121 13:43:18.372916 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:18 crc kubenswrapper[5133]: I1121 13:43:18.372942 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:18 crc kubenswrapper[5133]: I1121 13:43:18.372962 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:18Z","lastTransitionTime":"2025-11-21T13:43:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:18 crc kubenswrapper[5133]: I1121 13:43:18.456814 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:43:18 crc kubenswrapper[5133]: E1121 13:43:18.456992 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 13:43:18 crc kubenswrapper[5133]: I1121 13:43:18.497060 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:18 crc kubenswrapper[5133]: I1121 13:43:18.497109 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:18 crc kubenswrapper[5133]: I1121 13:43:18.497125 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:18 crc kubenswrapper[5133]: I1121 13:43:18.497148 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:18 crc kubenswrapper[5133]: I1121 13:43:18.497166 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:18Z","lastTransitionTime":"2025-11-21T13:43:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:18 crc kubenswrapper[5133]: I1121 13:43:18.599913 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:18 crc kubenswrapper[5133]: I1121 13:43:18.599957 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:18 crc kubenswrapper[5133]: I1121 13:43:18.599969 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:18 crc kubenswrapper[5133]: I1121 13:43:18.599985 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:18 crc kubenswrapper[5133]: I1121 13:43:18.599995 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:18Z","lastTransitionTime":"2025-11-21T13:43:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:18 crc kubenswrapper[5133]: I1121 13:43:18.702511 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:18 crc kubenswrapper[5133]: I1121 13:43:18.702581 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:18 crc kubenswrapper[5133]: I1121 13:43:18.702600 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:18 crc kubenswrapper[5133]: I1121 13:43:18.702627 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:18 crc kubenswrapper[5133]: I1121 13:43:18.702649 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:18Z","lastTransitionTime":"2025-11-21T13:43:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:18 crc kubenswrapper[5133]: I1121 13:43:18.805733 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:18 crc kubenswrapper[5133]: I1121 13:43:18.805796 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:18 crc kubenswrapper[5133]: I1121 13:43:18.805813 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:18 crc kubenswrapper[5133]: I1121 13:43:18.805835 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:18 crc kubenswrapper[5133]: I1121 13:43:18.805852 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:18Z","lastTransitionTime":"2025-11-21T13:43:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:18 crc kubenswrapper[5133]: I1121 13:43:18.909453 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:18 crc kubenswrapper[5133]: I1121 13:43:18.909520 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:18 crc kubenswrapper[5133]: I1121 13:43:18.909545 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:18 crc kubenswrapper[5133]: I1121 13:43:18.909573 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:18 crc kubenswrapper[5133]: I1121 13:43:18.909595 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:18Z","lastTransitionTime":"2025-11-21T13:43:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:19 crc kubenswrapper[5133]: I1121 13:43:19.012507 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:19 crc kubenswrapper[5133]: I1121 13:43:19.012547 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:19 crc kubenswrapper[5133]: I1121 13:43:19.012556 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:19 crc kubenswrapper[5133]: I1121 13:43:19.012572 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:19 crc kubenswrapper[5133]: I1121 13:43:19.012583 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:19Z","lastTransitionTime":"2025-11-21T13:43:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:19 crc kubenswrapper[5133]: I1121 13:43:19.115390 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:19 crc kubenswrapper[5133]: I1121 13:43:19.115452 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:19 crc kubenswrapper[5133]: I1121 13:43:19.115471 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:19 crc kubenswrapper[5133]: I1121 13:43:19.115496 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:19 crc kubenswrapper[5133]: I1121 13:43:19.115515 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:19Z","lastTransitionTime":"2025-11-21T13:43:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:19 crc kubenswrapper[5133]: I1121 13:43:19.218419 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:19 crc kubenswrapper[5133]: I1121 13:43:19.218494 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:19 crc kubenswrapper[5133]: I1121 13:43:19.218513 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:19 crc kubenswrapper[5133]: I1121 13:43:19.218539 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:19 crc kubenswrapper[5133]: I1121 13:43:19.218563 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:19Z","lastTransitionTime":"2025-11-21T13:43:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:19 crc kubenswrapper[5133]: I1121 13:43:19.321834 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:19 crc kubenswrapper[5133]: I1121 13:43:19.322204 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:19 crc kubenswrapper[5133]: I1121 13:43:19.322348 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:19 crc kubenswrapper[5133]: I1121 13:43:19.322482 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:19 crc kubenswrapper[5133]: I1121 13:43:19.322619 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:19Z","lastTransitionTime":"2025-11-21T13:43:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:19 crc kubenswrapper[5133]: I1121 13:43:19.336761 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 21 13:43:19 crc kubenswrapper[5133]: I1121 13:43:19.352578 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Nov 21 13:43:19 crc kubenswrapper[5133]: I1121 13:43:19.360623 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c335cd8-618b-4871-a0e2-deaa61ddc49a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5f6320dbfb8d910e52de319fe5350b435c1c9f00a2e1d5b2b953fb6d1688984\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://273a23deb0bee7d80bc12f28a4056a5b843e81cc7c411273e49c3aa0fdba5182\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c93dddad8f7a853e1302ba96f3fc6d6626b22de64c8cfd1ee63996820d0816cd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebb7634a6507b2323d36c3d57b19c374862e0bada0e81150da9db315e5812f12\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:19Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:19 crc kubenswrapper[5133]: I1121 13:43:19.383728 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1b5e12d17b3e683349818698223816569514a9f4ae5d14ba1f5661c472fce39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:19Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:19 crc kubenswrapper[5133]: I1121 13:43:19.403190 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bj52j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9cc533c-2914-45d2-97b4-d6e35361450d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b1cfd49e0e5564696bd26f92acb10ca3430f81dc3f690a51ecd7bfa14876bccb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdp6q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bj52j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:19Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:19 crc kubenswrapper[5133]: I1121 13:43:19.421428 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-x5wnh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3aabda0-97d9-4886-8909-1c423c4d3238\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p8kp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p8kp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:43:07Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-x5wnh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:19Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:19 crc kubenswrapper[5133]: I1121 13:43:19.425606 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:19 crc kubenswrapper[5133]: I1121 13:43:19.425906 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:19 crc kubenswrapper[5133]: I1121 13:43:19.426182 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:19 crc kubenswrapper[5133]: I1121 13:43:19.426421 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:19 crc kubenswrapper[5133]: I1121 13:43:19.426637 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:19Z","lastTransitionTime":"2025-11-21T13:43:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:19 crc kubenswrapper[5133]: I1121 13:43:19.444771 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f826000-be5b-4f8f-bdc5-b80e11bb5e65\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cac876542527f108f89313704d6275aed6b735176f7f38b0fccbfcd79fdbf6e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa9a560543d545bd50cbb9aa0e907a992f9b3afb36de7ec5e72010dd835d2574\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c86055d41460f757efc29eaa62834faf3f14f9ca5ba534479d0fcd0a43d3bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bca102c9301a0963f4be54906a6ca418a585d0aa6b063a0512c2a334928f0d88\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T13:42:47Z\\\",\\\"message\\\":\\\"le observer\\\\nW1121 13:42:47.565555 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 13:42:47.567527 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 13:42:47.569658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3912986073/tls.crt::/tmp/serving-cert-3912986073/tls.key\\\\\\\"\\\\nI1121 13:42:47.852533 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 13:42:47.856751 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 13:42:47.856781 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 13:42:47.856814 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 13:42:47.856821 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 13:42:47.862211 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 13:42:47.862280 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862290 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862309 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 13:42:47.862319 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 13:42:47.862326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 13:42:47.862333 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 13:42:47.863057 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 13:42:47.865438 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6b16c41d8bc248fc4de65102a71d3875d1ab768432f61581605fa487ebfc9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:19Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:19 crc kubenswrapper[5133]: I1121 13:43:19.457478 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:43:19 crc kubenswrapper[5133]: I1121 13:43:19.457569 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-x5wnh" Nov 21 13:43:19 crc kubenswrapper[5133]: E1121 13:43:19.457674 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 13:43:19 crc kubenswrapper[5133]: I1121 13:43:19.457953 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:43:19 crc kubenswrapper[5133]: E1121 13:43:19.458271 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-x5wnh" podUID="b3aabda0-97d9-4886-8909-1c423c4d3238" Nov 21 13:43:19 crc kubenswrapper[5133]: E1121 13:43:19.458548 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 13:43:19 crc kubenswrapper[5133]: I1121 13:43:19.466397 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede95ef8b82acda5cadd081a37fcb2a35fab8269c7ec403bb33a6feb8bf9eb88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3037d1f01bc9704cae9aa3eb4760e4dc737b1990a6ae5a007d3ec412efad85a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:19Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:19 crc kubenswrapper[5133]: I1121 13:43:19.486293 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:19Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:19 crc kubenswrapper[5133]: I1121 13:43:19.502710 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1236e5c5c7c8db59fd2faa688e9b781fe94721cc8aa644dd9ab91df2684c617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:19Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:19 crc kubenswrapper[5133]: I1121 13:43:19.521167 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m5d24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0077329a-abad-4c6d-a601-2dc01fd83184\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24fe246ff402a8854ee5e55ccc507a2e497dbb2cdfed3f0f8b380f00b9436661\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmd8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m5d24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:19Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:19 crc kubenswrapper[5133]: I1121 13:43:19.532803 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:19 crc kubenswrapper[5133]: I1121 13:43:19.532857 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:19 crc kubenswrapper[5133]: I1121 13:43:19.532875 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:19 crc kubenswrapper[5133]: I1121 13:43:19.532898 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:19 crc kubenswrapper[5133]: I1121 13:43:19.532927 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:19Z","lastTransitionTime":"2025-11-21T13:43:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:19 crc kubenswrapper[5133]: I1121 13:43:19.534882 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f5a729-05d1-4f84-a216-1df3233af57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c46519115b067feef9d8fb5783b8b9420bf99d97515021a7d389e6cdf1d64112\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e5c730e837240b2ed45dff8a5411b8b49d21e7fbfb2dfcc6aef568b73b57745\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xxlvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:19Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:19 crc kubenswrapper[5133]: I1121 13:43:19.555806 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"373d5da7-fae9-4689-9ede-6e2d69a54c02\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce076f27563e648bcbfd183634e87e0e31cedc359d0df1edc6af448b2a18f1a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b6bfce121246f367a034c172b839a31fe309cfc0f83db7ab4e48cb26d6a5145\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c53aca99f41348a8343f7a2a2afd9ca78e2e4ba6aae9bb06cdb3ed66c9d79aa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aee1dbbc6cd90fac255e86ddb27f159eba2e08dc6cc749a8eb351842330ee6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://563b9e061f37ddab57173a01efbf7bf025c470edccc47a03e7c5bb1e317a289f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd5003cc4327d8234259623232e844463af0efdc0b3e395fa3e2c30c714b872d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://19c3c094e3683ffe1c79217cc7824910561aa6a632f12caed78bfb761f30b726\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19c3c094e3683ffe1c79217cc7824910561aa6a632f12caed78bfb761f30b726\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T13:43:09Z\\\",\\\"message\\\":\\\"ontroller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:08Z is after 2025-08-24T17:21:41Z]\\\\nI1121 13:43:08.996396 6628 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-machine-config-operator/machine-config-daemon]} name:Service_openshift-machine-config-operator/machine-config-daemon_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.43:8798: 10.217.4.43:9001:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {a36f6289-d09f-43f8-8a8a-c9d2cc11eb0d}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1121 13:43:08.996441 6628 obj_retry.go:303] Retry objec\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:43:07Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-tjzm8_openshift-ovn-kubernetes(373d5da7-fae9-4689-9ede-6e2d69a54c02)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ab3fdf87c8fc052cd429333579ede0e857fcc8399de947f661b159e6a5f2a93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tjzm8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:19Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:19 crc kubenswrapper[5133]: I1121 13:43:19.569769 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b9v8b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0ec3a98-4d89-4f36-a79e-ac65da8672ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://933bd113027e6a7ee2eba33df125dcdcf389c21b99c7dd32bc654edaf7278e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d21a06ad72a199d989c29326725c9d49df6fbb9fc6e9d54bcd1f7bb89c78b02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6d21a06ad72a199d989c29326725c9d49df6fbb9fc6e9d54bcd1f7bb89c78b02\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7216da1e17ab61329ca71325b40fdbd040dbf83f072d565302a571acb1313e53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7216da1e17ab61329ca71325b40fdbd040dbf83f072d565302a571acb1313e53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a466e70f0711cd7d64221f62309d99dfa9e4b9910a69b2397240cfdf244578\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3a466e70f0711cd7d64221f62309d99dfa9e4b9910a69b2397240cfdf244578\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:43:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a6a968fb55017d7e2fee878bb4d50904766820de016afd8f01de3bbd92b2421\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a6a968fb55017d7e2fee878bb4d50904766820de016afd8f01de3bbd92b2421\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:43:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:43:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b9v8b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:19Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:19 crc kubenswrapper[5133]: I1121 13:43:19.585709 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:19Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:19 crc kubenswrapper[5133]: I1121 13:43:19.603352 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7vfdg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c337e24-9cef-4932-92ae-5a175379c77a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0eeb4cc8e3340d9eb9710ca7b55244d4055d6bc7cc31a6d227f233988b242823\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrzcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc648165e145d8ed6a43aff3ffb558380dc55ffd03816922b643bf7f740088fa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrzcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:43:05Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7vfdg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:19Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:19 crc kubenswrapper[5133]: I1121 13:43:19.622481 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:19Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:19 crc kubenswrapper[5133]: I1121 13:43:19.637076 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:19 crc kubenswrapper[5133]: I1121 13:43:19.637123 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:19 crc kubenswrapper[5133]: I1121 13:43:19.637140 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:19 crc kubenswrapper[5133]: I1121 13:43:19.637163 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:19 crc kubenswrapper[5133]: I1121 13:43:19.637177 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:19Z","lastTransitionTime":"2025-11-21T13:43:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:19 crc kubenswrapper[5133]: I1121 13:43:19.639497 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pvdwc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87822156-53e8-4eb5-b241-db506a21a1b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc374f8a6deccf60de941df327adc9f29d951a10746bd754c0d4f5573a141a71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jzt65\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pvdwc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:19Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:19 crc kubenswrapper[5133]: I1121 13:43:19.740034 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:19 crc kubenswrapper[5133]: I1121 13:43:19.740098 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:19 crc kubenswrapper[5133]: I1121 13:43:19.740116 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:19 crc kubenswrapper[5133]: I1121 13:43:19.740142 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:19 crc kubenswrapper[5133]: I1121 13:43:19.740163 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:19Z","lastTransitionTime":"2025-11-21T13:43:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:19 crc kubenswrapper[5133]: I1121 13:43:19.843078 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:19 crc kubenswrapper[5133]: I1121 13:43:19.843136 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:19 crc kubenswrapper[5133]: I1121 13:43:19.843154 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:19 crc kubenswrapper[5133]: I1121 13:43:19.843178 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:19 crc kubenswrapper[5133]: I1121 13:43:19.843196 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:19Z","lastTransitionTime":"2025-11-21T13:43:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:19 crc kubenswrapper[5133]: I1121 13:43:19.945842 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:19 crc kubenswrapper[5133]: I1121 13:43:19.945913 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:19 crc kubenswrapper[5133]: I1121 13:43:19.945930 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:19 crc kubenswrapper[5133]: I1121 13:43:19.945955 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:19 crc kubenswrapper[5133]: I1121 13:43:19.945976 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:19Z","lastTransitionTime":"2025-11-21T13:43:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:20 crc kubenswrapper[5133]: I1121 13:43:20.050271 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:20 crc kubenswrapper[5133]: I1121 13:43:20.050382 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:20 crc kubenswrapper[5133]: I1121 13:43:20.050413 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:20 crc kubenswrapper[5133]: I1121 13:43:20.050446 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:20 crc kubenswrapper[5133]: I1121 13:43:20.050470 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:20Z","lastTransitionTime":"2025-11-21T13:43:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:20 crc kubenswrapper[5133]: I1121 13:43:20.059976 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:43:20 crc kubenswrapper[5133]: E1121 13:43:20.060208 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 13:43:52.060168606 +0000 UTC m=+91.858000904 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:43:20 crc kubenswrapper[5133]: I1121 13:43:20.153624 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:20 crc kubenswrapper[5133]: I1121 13:43:20.153677 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:20 crc kubenswrapper[5133]: I1121 13:43:20.153689 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:20 crc kubenswrapper[5133]: I1121 13:43:20.153715 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:20 crc kubenswrapper[5133]: I1121 13:43:20.153728 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:20Z","lastTransitionTime":"2025-11-21T13:43:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:20 crc kubenswrapper[5133]: I1121 13:43:20.161830 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:43:20 crc kubenswrapper[5133]: I1121 13:43:20.161885 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:43:20 crc kubenswrapper[5133]: I1121 13:43:20.161912 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:43:20 crc kubenswrapper[5133]: I1121 13:43:20.161954 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:43:20 crc kubenswrapper[5133]: E1121 13:43:20.162131 5133 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 21 13:43:20 crc kubenswrapper[5133]: E1121 13:43:20.162139 5133 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 21 13:43:20 crc kubenswrapper[5133]: E1121 13:43:20.162162 5133 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 21 13:43:20 crc kubenswrapper[5133]: E1121 13:43:20.162193 5133 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 13:43:20 crc kubenswrapper[5133]: E1121 13:43:20.162220 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-21 13:43:52.162198205 +0000 UTC m=+91.960030463 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 21 13:43:20 crc kubenswrapper[5133]: E1121 13:43:20.162259 5133 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 21 13:43:20 crc kubenswrapper[5133]: E1121 13:43:20.162130 5133 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 21 13:43:20 crc kubenswrapper[5133]: E1121 13:43:20.162418 5133 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 21 13:43:20 crc kubenswrapper[5133]: E1121 13:43:20.162269 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-21 13:43:52.162246417 +0000 UTC m=+91.960078685 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 13:43:20 crc kubenswrapper[5133]: E1121 13:43:20.162443 5133 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 13:43:20 crc kubenswrapper[5133]: E1121 13:43:20.162465 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-21 13:43:52.162450582 +0000 UTC m=+91.960282850 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 21 13:43:20 crc kubenswrapper[5133]: E1121 13:43:20.162517 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-21 13:43:52.162488993 +0000 UTC m=+91.960321281 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 13:43:20 crc kubenswrapper[5133]: I1121 13:43:20.256504 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:20 crc kubenswrapper[5133]: I1121 13:43:20.256559 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:20 crc kubenswrapper[5133]: I1121 13:43:20.256573 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:20 crc kubenswrapper[5133]: I1121 13:43:20.256589 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:20 crc kubenswrapper[5133]: I1121 13:43:20.256603 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:20Z","lastTransitionTime":"2025-11-21T13:43:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:20 crc kubenswrapper[5133]: I1121 13:43:20.360267 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:20 crc kubenswrapper[5133]: I1121 13:43:20.360356 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:20 crc kubenswrapper[5133]: I1121 13:43:20.360383 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:20 crc kubenswrapper[5133]: I1121 13:43:20.360412 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:20 crc kubenswrapper[5133]: I1121 13:43:20.360430 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:20Z","lastTransitionTime":"2025-11-21T13:43:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:20 crc kubenswrapper[5133]: I1121 13:43:20.456684 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:43:20 crc kubenswrapper[5133]: E1121 13:43:20.456900 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 13:43:20 crc kubenswrapper[5133]: I1121 13:43:20.464186 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:20 crc kubenswrapper[5133]: I1121 13:43:20.464529 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:20 crc kubenswrapper[5133]: I1121 13:43:20.464710 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:20 crc kubenswrapper[5133]: I1121 13:43:20.464876 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:20 crc kubenswrapper[5133]: I1121 13:43:20.465079 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:20Z","lastTransitionTime":"2025-11-21T13:43:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:20 crc kubenswrapper[5133]: I1121 13:43:20.568111 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:20 crc kubenswrapper[5133]: I1121 13:43:20.568166 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:20 crc kubenswrapper[5133]: I1121 13:43:20.568189 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:20 crc kubenswrapper[5133]: I1121 13:43:20.568219 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:20 crc kubenswrapper[5133]: I1121 13:43:20.568236 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:20Z","lastTransitionTime":"2025-11-21T13:43:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:20 crc kubenswrapper[5133]: I1121 13:43:20.670839 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:20 crc kubenswrapper[5133]: I1121 13:43:20.671225 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:20 crc kubenswrapper[5133]: I1121 13:43:20.671377 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:20 crc kubenswrapper[5133]: I1121 13:43:20.671577 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:20 crc kubenswrapper[5133]: I1121 13:43:20.671792 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:20Z","lastTransitionTime":"2025-11-21T13:43:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:20 crc kubenswrapper[5133]: I1121 13:43:20.775994 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:20 crc kubenswrapper[5133]: I1121 13:43:20.776518 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:20 crc kubenswrapper[5133]: I1121 13:43:20.776728 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:20 crc kubenswrapper[5133]: I1121 13:43:20.776877 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:20 crc kubenswrapper[5133]: I1121 13:43:20.776971 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:20Z","lastTransitionTime":"2025-11-21T13:43:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:20 crc kubenswrapper[5133]: I1121 13:43:20.880377 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:20 crc kubenswrapper[5133]: I1121 13:43:20.880994 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:20 crc kubenswrapper[5133]: I1121 13:43:20.881269 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:20 crc kubenswrapper[5133]: I1121 13:43:20.881442 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:20 crc kubenswrapper[5133]: I1121 13:43:20.881571 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:20Z","lastTransitionTime":"2025-11-21T13:43:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:20 crc kubenswrapper[5133]: I1121 13:43:20.956199 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 21 13:43:20 crc kubenswrapper[5133]: I1121 13:43:20.983044 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7vfdg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c337e24-9cef-4932-92ae-5a175379c77a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0eeb4cc8e3340d9eb9710ca7b55244d4055d6bc7cc31a6d227f233988b242823\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrzcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc648165e145d8ed6a43aff3ffb558380dc55ffd03816922b643bf7f740088fa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrzcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:43:05Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7vfdg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:20Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:20 crc kubenswrapper[5133]: I1121 13:43:20.988474 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:20 crc kubenswrapper[5133]: I1121 13:43:20.988562 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:20 crc kubenswrapper[5133]: I1121 13:43:20.988588 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:20 crc kubenswrapper[5133]: I1121 13:43:20.988621 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:20 crc kubenswrapper[5133]: I1121 13:43:20.988646 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:20Z","lastTransitionTime":"2025-11-21T13:43:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:21 crc kubenswrapper[5133]: I1121 13:43:21.001103 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75a51560-1657-43fa-880f-bece0b75e088\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1dc13dd497fa4611689b3c047c602511745dbff2b9a797f8ea7316046531717\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca37cc062dd8763ac13b5a07fcd06f2997b1ff9cfe38d9a9ff3091980d679932\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57552cf42d0ae179e9c1c67120613da40995fbe308e06dbd466d0f71167142b2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f00c6fbd3ed9ce5e2787b591f39bd80828b96dd53f15696adfe62c3df47ed47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f00c6fbd3ed9ce5e2787b591f39bd80828b96dd53f15696adfe62c3df47ed47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:20Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:21 crc kubenswrapper[5133]: I1121 13:43:21.017893 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:21Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:21 crc kubenswrapper[5133]: I1121 13:43:21.034395 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:21Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:21 crc kubenswrapper[5133]: I1121 13:43:21.051448 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pvdwc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87822156-53e8-4eb5-b241-db506a21a1b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc374f8a6deccf60de941df327adc9f29d951a10746bd754c0d4f5573a141a71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jzt65\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pvdwc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:21Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:21 crc kubenswrapper[5133]: I1121 13:43:21.069907 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c335cd8-618b-4871-a0e2-deaa61ddc49a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5f6320dbfb8d910e52de319fe5350b435c1c9f00a2e1d5b2b953fb6d1688984\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://273a23deb0bee7d80bc12f28a4056a5b843e81cc7c411273e49c3aa0fdba5182\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c93dddad8f7a853e1302ba96f3fc6d6626b22de64c8cfd1ee63996820d0816cd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebb7634a6507b2323d36c3d57b19c374862e0bada0e81150da9db315e5812f12\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:21Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:21 crc kubenswrapper[5133]: I1121 13:43:21.090622 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1b5e12d17b3e683349818698223816569514a9f4ae5d14ba1f5661c472fce39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:21Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:21 crc kubenswrapper[5133]: I1121 13:43:21.092804 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:21 crc kubenswrapper[5133]: I1121 13:43:21.092851 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:21 crc kubenswrapper[5133]: I1121 13:43:21.092863 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:21 crc kubenswrapper[5133]: I1121 13:43:21.092906 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:21 crc kubenswrapper[5133]: I1121 13:43:21.092922 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:21Z","lastTransitionTime":"2025-11-21T13:43:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:21 crc kubenswrapper[5133]: I1121 13:43:21.101921 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bj52j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9cc533c-2914-45d2-97b4-d6e35361450d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b1cfd49e0e5564696bd26f92acb10ca3430f81dc3f690a51ecd7bfa14876bccb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdp6q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bj52j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:21Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:21 crc kubenswrapper[5133]: I1121 13:43:21.117058 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-x5wnh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3aabda0-97d9-4886-8909-1c423c4d3238\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p8kp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p8kp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:43:07Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-x5wnh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:21Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:21 crc kubenswrapper[5133]: I1121 13:43:21.131370 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m5d24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0077329a-abad-4c6d-a601-2dc01fd83184\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24fe246ff402a8854ee5e55ccc507a2e497dbb2cdfed3f0f8b380f00b9436661\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmd8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m5d24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:21Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:21 crc kubenswrapper[5133]: I1121 13:43:21.151147 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f5a729-05d1-4f84-a216-1df3233af57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c46519115b067feef9d8fb5783b8b9420bf99d97515021a7d389e6cdf1d64112\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e5c730e837240b2ed45dff8a5411b8b49d21e7fbfb2dfcc6aef568b73b57745\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xxlvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:21Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:21 crc kubenswrapper[5133]: I1121 13:43:21.182957 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"373d5da7-fae9-4689-9ede-6e2d69a54c02\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce076f27563e648bcbfd183634e87e0e31cedc359d0df1edc6af448b2a18f1a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b6bfce121246f367a034c172b839a31fe309cfc0f83db7ab4e48cb26d6a5145\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c53aca99f41348a8343f7a2a2afd9ca78e2e4ba6aae9bb06cdb3ed66c9d79aa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aee1dbbc6cd90fac255e86ddb27f159eba2e08dc6cc749a8eb351842330ee6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://563b9e061f37ddab57173a01efbf7bf025c470edccc47a03e7c5bb1e317a289f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd5003cc4327d8234259623232e844463af0efdc0b3e395fa3e2c30c714b872d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://19c3c094e3683ffe1c79217cc7824910561aa6a632f12caed78bfb761f30b726\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19c3c094e3683ffe1c79217cc7824910561aa6a632f12caed78bfb761f30b726\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T13:43:09Z\\\",\\\"message\\\":\\\"ontroller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:08Z is after 2025-08-24T17:21:41Z]\\\\nI1121 13:43:08.996396 6628 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-machine-config-operator/machine-config-daemon]} name:Service_openshift-machine-config-operator/machine-config-daemon_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.43:8798: 10.217.4.43:9001:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {a36f6289-d09f-43f8-8a8a-c9d2cc11eb0d}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1121 13:43:08.996441 6628 obj_retry.go:303] Retry objec\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:43:07Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-tjzm8_openshift-ovn-kubernetes(373d5da7-fae9-4689-9ede-6e2d69a54c02)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ab3fdf87c8fc052cd429333579ede0e857fcc8399de947f661b159e6a5f2a93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tjzm8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:21Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:21 crc kubenswrapper[5133]: I1121 13:43:21.195364 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:21 crc kubenswrapper[5133]: I1121 13:43:21.195433 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:21 crc kubenswrapper[5133]: I1121 13:43:21.195443 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:21 crc kubenswrapper[5133]: I1121 13:43:21.195494 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:21 crc kubenswrapper[5133]: I1121 13:43:21.195508 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:21Z","lastTransitionTime":"2025-11-21T13:43:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:21 crc kubenswrapper[5133]: I1121 13:43:21.205630 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b9v8b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0ec3a98-4d89-4f36-a79e-ac65da8672ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://933bd113027e6a7ee2eba33df125dcdcf389c21b99c7dd32bc654edaf7278e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d21a06ad72a199d989c29326725c9d49df6fbb9fc6e9d54bcd1f7bb89c78b02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6d21a06ad72a199d989c29326725c9d49df6fbb9fc6e9d54bcd1f7bb89c78b02\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7216da1e17ab61329ca71325b40fdbd040dbf83f072d565302a571acb1313e53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7216da1e17ab61329ca71325b40fdbd040dbf83f072d565302a571acb1313e53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a466e70f0711cd7d64221f62309d99dfa9e4b9910a69b2397240cfdf244578\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3a466e70f0711cd7d64221f62309d99dfa9e4b9910a69b2397240cfdf244578\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:43:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a6a968fb55017d7e2fee878bb4d50904766820de016afd8f01de3bbd92b2421\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a6a968fb55017d7e2fee878bb4d50904766820de016afd8f01de3bbd92b2421\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:43:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:43:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b9v8b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:21Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:21 crc kubenswrapper[5133]: I1121 13:43:21.221787 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f826000-be5b-4f8f-bdc5-b80e11bb5e65\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cac876542527f108f89313704d6275aed6b735176f7f38b0fccbfcd79fdbf6e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa9a560543d545bd50cbb9aa0e907a992f9b3afb36de7ec5e72010dd835d2574\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c86055d41460f757efc29eaa62834faf3f14f9ca5ba534479d0fcd0a43d3bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bca102c9301a0963f4be54906a6ca418a585d0aa6b063a0512c2a334928f0d88\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T13:42:47Z\\\",\\\"message\\\":\\\"le observer\\\\nW1121 13:42:47.565555 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 13:42:47.567527 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 13:42:47.569658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3912986073/tls.crt::/tmp/serving-cert-3912986073/tls.key\\\\\\\"\\\\nI1121 13:42:47.852533 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 13:42:47.856751 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 13:42:47.856781 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 13:42:47.856814 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 13:42:47.856821 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 13:42:47.862211 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 13:42:47.862280 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862290 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862309 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 13:42:47.862319 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 13:42:47.862326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 13:42:47.862333 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 13:42:47.863057 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 13:42:47.865438 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6b16c41d8bc248fc4de65102a71d3875d1ab768432f61581605fa487ebfc9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:21Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:21 crc kubenswrapper[5133]: I1121 13:43:21.239934 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede95ef8b82acda5cadd081a37fcb2a35fab8269c7ec403bb33a6feb8bf9eb88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3037d1f01bc9704cae9aa3eb4760e4dc737b1990a6ae5a007d3ec412efad85a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:21Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:21 crc kubenswrapper[5133]: I1121 13:43:21.258103 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:21Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:21 crc kubenswrapper[5133]: I1121 13:43:21.276799 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1236e5c5c7c8db59fd2faa688e9b781fe94721cc8aa644dd9ab91df2684c617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:21Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:21 crc kubenswrapper[5133]: I1121 13:43:21.297943 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:21 crc kubenswrapper[5133]: I1121 13:43:21.297981 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:21 crc kubenswrapper[5133]: I1121 13:43:21.297991 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:21 crc kubenswrapper[5133]: I1121 13:43:21.298017 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:21 crc kubenswrapper[5133]: I1121 13:43:21.298028 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:21Z","lastTransitionTime":"2025-11-21T13:43:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:21 crc kubenswrapper[5133]: I1121 13:43:21.401494 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:21 crc kubenswrapper[5133]: I1121 13:43:21.401569 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:21 crc kubenswrapper[5133]: I1121 13:43:21.401593 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:21 crc kubenswrapper[5133]: I1121 13:43:21.401622 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:21 crc kubenswrapper[5133]: I1121 13:43:21.401644 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:21Z","lastTransitionTime":"2025-11-21T13:43:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:21 crc kubenswrapper[5133]: I1121 13:43:21.457270 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:43:21 crc kubenswrapper[5133]: E1121 13:43:21.457628 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 13:43:21 crc kubenswrapper[5133]: I1121 13:43:21.457380 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-x5wnh" Nov 21 13:43:21 crc kubenswrapper[5133]: E1121 13:43:21.457822 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-x5wnh" podUID="b3aabda0-97d9-4886-8909-1c423c4d3238" Nov 21 13:43:21 crc kubenswrapper[5133]: I1121 13:43:21.457276 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:43:21 crc kubenswrapper[5133]: E1121 13:43:21.458063 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 13:43:21 crc kubenswrapper[5133]: I1121 13:43:21.503872 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:21 crc kubenswrapper[5133]: I1121 13:43:21.503936 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:21 crc kubenswrapper[5133]: I1121 13:43:21.503956 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:21 crc kubenswrapper[5133]: I1121 13:43:21.503981 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:21 crc kubenswrapper[5133]: I1121 13:43:21.504040 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:21Z","lastTransitionTime":"2025-11-21T13:43:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:21 crc kubenswrapper[5133]: I1121 13:43:21.607249 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:21 crc kubenswrapper[5133]: I1121 13:43:21.607328 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:21 crc kubenswrapper[5133]: I1121 13:43:21.607347 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:21 crc kubenswrapper[5133]: I1121 13:43:21.607374 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:21 crc kubenswrapper[5133]: I1121 13:43:21.607392 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:21Z","lastTransitionTime":"2025-11-21T13:43:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:21 crc kubenswrapper[5133]: I1121 13:43:21.710469 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:21 crc kubenswrapper[5133]: I1121 13:43:21.710539 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:21 crc kubenswrapper[5133]: I1121 13:43:21.710558 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:21 crc kubenswrapper[5133]: I1121 13:43:21.710586 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:21 crc kubenswrapper[5133]: I1121 13:43:21.710605 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:21Z","lastTransitionTime":"2025-11-21T13:43:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:21 crc kubenswrapper[5133]: I1121 13:43:21.813990 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:21 crc kubenswrapper[5133]: I1121 13:43:21.814079 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:21 crc kubenswrapper[5133]: I1121 13:43:21.814096 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:21 crc kubenswrapper[5133]: I1121 13:43:21.814120 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:21 crc kubenswrapper[5133]: I1121 13:43:21.814138 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:21Z","lastTransitionTime":"2025-11-21T13:43:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:21 crc kubenswrapper[5133]: I1121 13:43:21.916616 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:21 crc kubenswrapper[5133]: I1121 13:43:21.916650 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:21 crc kubenswrapper[5133]: I1121 13:43:21.916659 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:21 crc kubenswrapper[5133]: I1121 13:43:21.916672 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:21 crc kubenswrapper[5133]: I1121 13:43:21.916682 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:21Z","lastTransitionTime":"2025-11-21T13:43:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.019622 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.019659 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.019688 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.019702 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.019711 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:22Z","lastTransitionTime":"2025-11-21T13:43:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.122329 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.122376 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.122386 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.122403 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.122416 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:22Z","lastTransitionTime":"2025-11-21T13:43:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.224956 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.225222 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.225319 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.225401 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.225461 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:22Z","lastTransitionTime":"2025-11-21T13:43:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.328802 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.329272 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.329295 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.329325 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.329354 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:22Z","lastTransitionTime":"2025-11-21T13:43:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.432400 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.432462 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.432482 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.432503 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.432517 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:22Z","lastTransitionTime":"2025-11-21T13:43:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.457037 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:43:22 crc kubenswrapper[5133]: E1121 13:43:22.457412 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.458200 5133 scope.go:117] "RemoveContainer" containerID="19c3c094e3683ffe1c79217cc7824910561aa6a632f12caed78bfb761f30b726" Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.474859 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1236e5c5c7c8db59fd2faa688e9b781fe94721cc8aa644dd9ab91df2684c617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:22Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.495781 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m5d24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0077329a-abad-4c6d-a601-2dc01fd83184\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24fe246ff402a8854ee5e55ccc507a2e497dbb2cdfed3f0f8b380f00b9436661\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmd8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m5d24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:22Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.515449 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f5a729-05d1-4f84-a216-1df3233af57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c46519115b067feef9d8fb5783b8b9420bf99d97515021a7d389e6cdf1d64112\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e5c730e837240b2ed45dff8a5411b8b49d21e7fbfb2dfcc6aef568b73b57745\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xxlvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:22Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.535776 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.535838 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.535854 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.536214 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.536259 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:22Z","lastTransitionTime":"2025-11-21T13:43:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.541226 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"373d5da7-fae9-4689-9ede-6e2d69a54c02\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce076f27563e648bcbfd183634e87e0e31cedc359d0df1edc6af448b2a18f1a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b6bfce121246f367a034c172b839a31fe309cfc0f83db7ab4e48cb26d6a5145\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c53aca99f41348a8343f7a2a2afd9ca78e2e4ba6aae9bb06cdb3ed66c9d79aa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aee1dbbc6cd90fac255e86ddb27f159eba2e08dc6cc749a8eb351842330ee6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://563b9e061f37ddab57173a01efbf7bf025c470edccc47a03e7c5bb1e317a289f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd5003cc4327d8234259623232e844463af0efdc0b3e395fa3e2c30c714b872d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://19c3c094e3683ffe1c79217cc7824910561aa6a632f12caed78bfb761f30b726\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19c3c094e3683ffe1c79217cc7824910561aa6a632f12caed78bfb761f30b726\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T13:43:09Z\\\",\\\"message\\\":\\\"ontroller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:08Z is after 2025-08-24T17:21:41Z]\\\\nI1121 13:43:08.996396 6628 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-machine-config-operator/machine-config-daemon]} name:Service_openshift-machine-config-operator/machine-config-daemon_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.43:8798: 10.217.4.43:9001:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {a36f6289-d09f-43f8-8a8a-c9d2cc11eb0d}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1121 13:43:08.996441 6628 obj_retry.go:303] Retry objec\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:43:07Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-tjzm8_openshift-ovn-kubernetes(373d5da7-fae9-4689-9ede-6e2d69a54c02)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ab3fdf87c8fc052cd429333579ede0e857fcc8399de947f661b159e6a5f2a93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tjzm8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:22Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.564464 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b9v8b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0ec3a98-4d89-4f36-a79e-ac65da8672ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://933bd113027e6a7ee2eba33df125dcdcf389c21b99c7dd32bc654edaf7278e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d21a06ad72a199d989c29326725c9d49df6fbb9fc6e9d54bcd1f7bb89c78b02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6d21a06ad72a199d989c29326725c9d49df6fbb9fc6e9d54bcd1f7bb89c78b02\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7216da1e17ab61329ca71325b40fdbd040dbf83f072d565302a571acb1313e53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7216da1e17ab61329ca71325b40fdbd040dbf83f072d565302a571acb1313e53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a466e70f0711cd7d64221f62309d99dfa9e4b9910a69b2397240cfdf244578\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3a466e70f0711cd7d64221f62309d99dfa9e4b9910a69b2397240cfdf244578\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:43:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a6a968fb55017d7e2fee878bb4d50904766820de016afd8f01de3bbd92b2421\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a6a968fb55017d7e2fee878bb4d50904766820de016afd8f01de3bbd92b2421\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:43:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:43:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b9v8b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:22Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.579695 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f826000-be5b-4f8f-bdc5-b80e11bb5e65\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cac876542527f108f89313704d6275aed6b735176f7f38b0fccbfcd79fdbf6e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa9a560543d545bd50cbb9aa0e907a992f9b3afb36de7ec5e72010dd835d2574\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c86055d41460f757efc29eaa62834faf3f14f9ca5ba534479d0fcd0a43d3bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bca102c9301a0963f4be54906a6ca418a585d0aa6b063a0512c2a334928f0d88\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T13:42:47Z\\\",\\\"message\\\":\\\"le observer\\\\nW1121 13:42:47.565555 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 13:42:47.567527 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 13:42:47.569658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3912986073/tls.crt::/tmp/serving-cert-3912986073/tls.key\\\\\\\"\\\\nI1121 13:42:47.852533 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 13:42:47.856751 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 13:42:47.856781 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 13:42:47.856814 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 13:42:47.856821 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 13:42:47.862211 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 13:42:47.862280 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862290 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862309 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 13:42:47.862319 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 13:42:47.862326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 13:42:47.862333 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 13:42:47.863057 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 13:42:47.865438 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6b16c41d8bc248fc4de65102a71d3875d1ab768432f61581605fa487ebfc9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:22Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.596771 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede95ef8b82acda5cadd081a37fcb2a35fab8269c7ec403bb33a6feb8bf9eb88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3037d1f01bc9704cae9aa3eb4760e4dc737b1990a6ae5a007d3ec412efad85a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:22Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.610830 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:22Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.626125 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:22Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.638688 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.638749 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.638766 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.638787 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.638803 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:22Z","lastTransitionTime":"2025-11-21T13:43:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.644984 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7vfdg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c337e24-9cef-4932-92ae-5a175379c77a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0eeb4cc8e3340d9eb9710ca7b55244d4055d6bc7cc31a6d227f233988b242823\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrzcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc648165e145d8ed6a43aff3ffb558380dc55ffd03816922b643bf7f740088fa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrzcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:43:05Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7vfdg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:22Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.659800 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75a51560-1657-43fa-880f-bece0b75e088\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1dc13dd497fa4611689b3c047c602511745dbff2b9a797f8ea7316046531717\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca37cc062dd8763ac13b5a07fcd06f2997b1ff9cfe38d9a9ff3091980d679932\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57552cf42d0ae179e9c1c67120613da40995fbe308e06dbd466d0f71167142b2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f00c6fbd3ed9ce5e2787b591f39bd80828b96dd53f15696adfe62c3df47ed47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f00c6fbd3ed9ce5e2787b591f39bd80828b96dd53f15696adfe62c3df47ed47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:22Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.676595 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:22Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.688701 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pvdwc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87822156-53e8-4eb5-b241-db506a21a1b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc374f8a6deccf60de941df327adc9f29d951a10746bd754c0d4f5573a141a71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jzt65\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pvdwc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:22Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.700497 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-x5wnh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3aabda0-97d9-4886-8909-1c423c4d3238\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p8kp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p8kp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:43:07Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-x5wnh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:22Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.713703 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c335cd8-618b-4871-a0e2-deaa61ddc49a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5f6320dbfb8d910e52de319fe5350b435c1c9f00a2e1d5b2b953fb6d1688984\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://273a23deb0bee7d80bc12f28a4056a5b843e81cc7c411273e49c3aa0fdba5182\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c93dddad8f7a853e1302ba96f3fc6d6626b22de64c8cfd1ee63996820d0816cd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebb7634a6507b2323d36c3d57b19c374862e0bada0e81150da9db315e5812f12\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:22Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.732840 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1b5e12d17b3e683349818698223816569514a9f4ae5d14ba1f5661c472fce39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:22Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.741169 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.741253 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.741265 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.741283 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.741296 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:22Z","lastTransitionTime":"2025-11-21T13:43:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.746463 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bj52j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9cc533c-2914-45d2-97b4-d6e35361450d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b1cfd49e0e5564696bd26f92acb10ca3430f81dc3f690a51ecd7bfa14876bccb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdp6q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bj52j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:22Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.844370 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.844433 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.844450 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.844475 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.844489 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:22Z","lastTransitionTime":"2025-11-21T13:43:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.913864 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-tjzm8_373d5da7-fae9-4689-9ede-6e2d69a54c02/ovnkube-controller/1.log" Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.928541 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" event={"ID":"373d5da7-fae9-4689-9ede-6e2d69a54c02","Type":"ContainerStarted","Data":"e71d9a11d15c18007af7493702bac6d13cc2845f32b14f37d6252bac480dbc38"} Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.929408 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.948495 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:22Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.952369 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.952411 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.952428 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.952451 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.952470 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:22Z","lastTransitionTime":"2025-11-21T13:43:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.969287 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pvdwc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87822156-53e8-4eb5-b241-db506a21a1b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc374f8a6deccf60de941df327adc9f29d951a10746bd754c0d4f5573a141a71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jzt65\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pvdwc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:22Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:22 crc kubenswrapper[5133]: I1121 13:43:22.987886 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c335cd8-618b-4871-a0e2-deaa61ddc49a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5f6320dbfb8d910e52de319fe5350b435c1c9f00a2e1d5b2b953fb6d1688984\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://273a23deb0bee7d80bc12f28a4056a5b843e81cc7c411273e49c3aa0fdba5182\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c93dddad8f7a853e1302ba96f3fc6d6626b22de64c8cfd1ee63996820d0816cd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebb7634a6507b2323d36c3d57b19c374862e0bada0e81150da9db315e5812f12\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:22Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.007912 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1b5e12d17b3e683349818698223816569514a9f4ae5d14ba1f5661c472fce39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:23Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.021705 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bj52j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9cc533c-2914-45d2-97b4-d6e35361450d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b1cfd49e0e5564696bd26f92acb10ca3430f81dc3f690a51ecd7bfa14876bccb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdp6q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bj52j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:23Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.037605 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-x5wnh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3aabda0-97d9-4886-8909-1c423c4d3238\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p8kp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p8kp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:43:07Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-x5wnh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:23Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.055457 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.055526 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.055550 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.055576 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.055595 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:23Z","lastTransitionTime":"2025-11-21T13:43:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.069684 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"373d5da7-fae9-4689-9ede-6e2d69a54c02\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce076f27563e648bcbfd183634e87e0e31cedc359d0df1edc6af448b2a18f1a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b6bfce121246f367a034c172b839a31fe309cfc0f83db7ab4e48cb26d6a5145\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c53aca99f41348a8343f7a2a2afd9ca78e2e4ba6aae9bb06cdb3ed66c9d79aa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aee1dbbc6cd90fac255e86ddb27f159eba2e08dc6cc749a8eb351842330ee6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://563b9e061f37ddab57173a01efbf7bf025c470edccc47a03e7c5bb1e317a289f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd5003cc4327d8234259623232e844463af0efdc0b3e395fa3e2c30c714b872d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e71d9a11d15c18007af7493702bac6d13cc2845f32b14f37d6252bac480dbc38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19c3c094e3683ffe1c79217cc7824910561aa6a632f12caed78bfb761f30b726\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T13:43:09Z\\\",\\\"message\\\":\\\"ontroller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:08Z is after 2025-08-24T17:21:41Z]\\\\nI1121 13:43:08.996396 6628 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-machine-config-operator/machine-config-daemon]} name:Service_openshift-machine-config-operator/machine-config-daemon_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.43:8798: 10.217.4.43:9001:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {a36f6289-d09f-43f8-8a8a-c9d2cc11eb0d}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1121 13:43:08.996441 6628 obj_retry.go:303] Retry objec\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:43:07Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ab3fdf87c8fc052cd429333579ede0e857fcc8399de947f661b159e6a5f2a93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tjzm8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:23Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.132501 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b3aabda0-97d9-4886-8909-1c423c4d3238-metrics-certs\") pod \"network-metrics-daemon-x5wnh\" (UID: \"b3aabda0-97d9-4886-8909-1c423c4d3238\") " pod="openshift-multus/network-metrics-daemon-x5wnh" Nov 21 13:43:23 crc kubenswrapper[5133]: E1121 13:43:23.132662 5133 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 21 13:43:23 crc kubenswrapper[5133]: E1121 13:43:23.132723 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b3aabda0-97d9-4886-8909-1c423c4d3238-metrics-certs podName:b3aabda0-97d9-4886-8909-1c423c4d3238 nodeName:}" failed. No retries permitted until 2025-11-21 13:43:39.132704735 +0000 UTC m=+78.930536983 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/b3aabda0-97d9-4886-8909-1c423c4d3238-metrics-certs") pod "network-metrics-daemon-x5wnh" (UID: "b3aabda0-97d9-4886-8909-1c423c4d3238") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.135258 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b9v8b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0ec3a98-4d89-4f36-a79e-ac65da8672ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://933bd113027e6a7ee2eba33df125dcdcf389c21b99c7dd32bc654edaf7278e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d21a06ad72a199d989c29326725c9d49df6fbb9fc6e9d54bcd1f7bb89c78b02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6d21a06ad72a199d989c29326725c9d49df6fbb9fc6e9d54bcd1f7bb89c78b02\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7216da1e17ab61329ca71325b40fdbd040dbf83f072d565302a571acb1313e53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7216da1e17ab61329ca71325b40fdbd040dbf83f072d565302a571acb1313e53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a466e70f0711cd7d64221f62309d99dfa9e4b9910a69b2397240cfdf244578\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3a466e70f0711cd7d64221f62309d99dfa9e4b9910a69b2397240cfdf244578\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:43:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a6a968fb55017d7e2fee878bb4d50904766820de016afd8f01de3bbd92b2421\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a6a968fb55017d7e2fee878bb4d50904766820de016afd8f01de3bbd92b2421\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:43:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:43:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b9v8b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:23Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.152667 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f826000-be5b-4f8f-bdc5-b80e11bb5e65\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cac876542527f108f89313704d6275aed6b735176f7f38b0fccbfcd79fdbf6e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa9a560543d545bd50cbb9aa0e907a992f9b3afb36de7ec5e72010dd835d2574\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c86055d41460f757efc29eaa62834faf3f14f9ca5ba534479d0fcd0a43d3bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bca102c9301a0963f4be54906a6ca418a585d0aa6b063a0512c2a334928f0d88\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T13:42:47Z\\\",\\\"message\\\":\\\"le observer\\\\nW1121 13:42:47.565555 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 13:42:47.567527 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 13:42:47.569658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3912986073/tls.crt::/tmp/serving-cert-3912986073/tls.key\\\\\\\"\\\\nI1121 13:42:47.852533 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 13:42:47.856751 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 13:42:47.856781 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 13:42:47.856814 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 13:42:47.856821 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 13:42:47.862211 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 13:42:47.862280 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862290 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862309 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 13:42:47.862319 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 13:42:47.862326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 13:42:47.862333 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 13:42:47.863057 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 13:42:47.865438 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6b16c41d8bc248fc4de65102a71d3875d1ab768432f61581605fa487ebfc9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:23Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.158512 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.158708 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.158789 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.158880 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.158956 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:23Z","lastTransitionTime":"2025-11-21T13:43:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.173680 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede95ef8b82acda5cadd081a37fcb2a35fab8269c7ec403bb33a6feb8bf9eb88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3037d1f01bc9704cae9aa3eb4760e4dc737b1990a6ae5a007d3ec412efad85a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:23Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.189291 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:23Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.202656 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1236e5c5c7c8db59fd2faa688e9b781fe94721cc8aa644dd9ab91df2684c617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:23Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.214882 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m5d24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0077329a-abad-4c6d-a601-2dc01fd83184\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24fe246ff402a8854ee5e55ccc507a2e497dbb2cdfed3f0f8b380f00b9436661\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmd8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m5d24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:23Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.226771 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f5a729-05d1-4f84-a216-1df3233af57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c46519115b067feef9d8fb5783b8b9420bf99d97515021a7d389e6cdf1d64112\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e5c730e837240b2ed45dff8a5411b8b49d21e7fbfb2dfcc6aef568b73b57745\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xxlvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:23Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.245204 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75a51560-1657-43fa-880f-bece0b75e088\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1dc13dd497fa4611689b3c047c602511745dbff2b9a797f8ea7316046531717\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca37cc062dd8763ac13b5a07fcd06f2997b1ff9cfe38d9a9ff3091980d679932\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57552cf42d0ae179e9c1c67120613da40995fbe308e06dbd466d0f71167142b2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f00c6fbd3ed9ce5e2787b591f39bd80828b96dd53f15696adfe62c3df47ed47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f00c6fbd3ed9ce5e2787b591f39bd80828b96dd53f15696adfe62c3df47ed47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:23Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.257672 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:23Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.267364 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.267411 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.267421 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.267440 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.267452 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:23Z","lastTransitionTime":"2025-11-21T13:43:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.279374 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7vfdg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c337e24-9cef-4932-92ae-5a175379c77a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0eeb4cc8e3340d9eb9710ca7b55244d4055d6bc7cc31a6d227f233988b242823\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrzcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc648165e145d8ed6a43aff3ffb558380dc55ffd03816922b643bf7f740088fa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrzcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:43:05Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7vfdg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:23Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.371919 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.371967 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.371981 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.372080 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.372097 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:23Z","lastTransitionTime":"2025-11-21T13:43:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.457423 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-x5wnh" Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.457484 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:43:23 crc kubenswrapper[5133]: E1121 13:43:23.457564 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-x5wnh" podUID="b3aabda0-97d9-4886-8909-1c423c4d3238" Nov 21 13:43:23 crc kubenswrapper[5133]: E1121 13:43:23.457693 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.458091 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:43:23 crc kubenswrapper[5133]: E1121 13:43:23.458427 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.474596 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.474625 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.474634 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.474645 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.474670 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:23Z","lastTransitionTime":"2025-11-21T13:43:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.576289 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.576325 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.576334 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.576347 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.576359 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:23Z","lastTransitionTime":"2025-11-21T13:43:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.678297 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.678599 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.678609 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.678624 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.678636 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:23Z","lastTransitionTime":"2025-11-21T13:43:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.782508 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.782881 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.783122 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.783326 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.783497 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:23Z","lastTransitionTime":"2025-11-21T13:43:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.886066 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.886133 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.886145 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.886167 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.886182 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:23Z","lastTransitionTime":"2025-11-21T13:43:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.939686 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-tjzm8_373d5da7-fae9-4689-9ede-6e2d69a54c02/ovnkube-controller/2.log" Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.941032 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-tjzm8_373d5da7-fae9-4689-9ede-6e2d69a54c02/ovnkube-controller/1.log" Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.946561 5133 generic.go:334] "Generic (PLEG): container finished" podID="373d5da7-fae9-4689-9ede-6e2d69a54c02" containerID="e71d9a11d15c18007af7493702bac6d13cc2845f32b14f37d6252bac480dbc38" exitCode=1 Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.946651 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" event={"ID":"373d5da7-fae9-4689-9ede-6e2d69a54c02","Type":"ContainerDied","Data":"e71d9a11d15c18007af7493702bac6d13cc2845f32b14f37d6252bac480dbc38"} Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.947015 5133 scope.go:117] "RemoveContainer" containerID="19c3c094e3683ffe1c79217cc7824910561aa6a632f12caed78bfb761f30b726" Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.948048 5133 scope.go:117] "RemoveContainer" containerID="e71d9a11d15c18007af7493702bac6d13cc2845f32b14f37d6252bac480dbc38" Nov 21 13:43:23 crc kubenswrapper[5133]: E1121 13:43:23.948436 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-tjzm8_openshift-ovn-kubernetes(373d5da7-fae9-4689-9ede-6e2d69a54c02)\"" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" podUID="373d5da7-fae9-4689-9ede-6e2d69a54c02" Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.977374 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-x5wnh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3aabda0-97d9-4886-8909-1c423c4d3238\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p8kp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p8kp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:43:07Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-x5wnh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:23Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.990347 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.990484 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.990542 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.990611 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.990667 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:23Z","lastTransitionTime":"2025-11-21T13:43:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:23 crc kubenswrapper[5133]: I1121 13:43:23.992464 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c335cd8-618b-4871-a0e2-deaa61ddc49a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5f6320dbfb8d910e52de319fe5350b435c1c9f00a2e1d5b2b953fb6d1688984\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://273a23deb0bee7d80bc12f28a4056a5b843e81cc7c411273e49c3aa0fdba5182\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c93dddad8f7a853e1302ba96f3fc6d6626b22de64c8cfd1ee63996820d0816cd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebb7634a6507b2323d36c3d57b19c374862e0bada0e81150da9db315e5812f12\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:23Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:24 crc kubenswrapper[5133]: I1121 13:43:24.014318 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1b5e12d17b3e683349818698223816569514a9f4ae5d14ba1f5661c472fce39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:24Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:24 crc kubenswrapper[5133]: I1121 13:43:24.031830 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bj52j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9cc533c-2914-45d2-97b4-d6e35361450d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b1cfd49e0e5564696bd26f92acb10ca3430f81dc3f690a51ecd7bfa14876bccb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdp6q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bj52j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:24Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:24 crc kubenswrapper[5133]: I1121 13:43:24.050065 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1236e5c5c7c8db59fd2faa688e9b781fe94721cc8aa644dd9ab91df2684c617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:24Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:24 crc kubenswrapper[5133]: I1121 13:43:24.074370 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m5d24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0077329a-abad-4c6d-a601-2dc01fd83184\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24fe246ff402a8854ee5e55ccc507a2e497dbb2cdfed3f0f8b380f00b9436661\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmd8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m5d24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:24Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:24 crc kubenswrapper[5133]: I1121 13:43:24.093265 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f5a729-05d1-4f84-a216-1df3233af57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c46519115b067feef9d8fb5783b8b9420bf99d97515021a7d389e6cdf1d64112\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e5c730e837240b2ed45dff8a5411b8b49d21e7fbfb2dfcc6aef568b73b57745\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xxlvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:24Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:24 crc kubenswrapper[5133]: I1121 13:43:24.095094 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:24 crc kubenswrapper[5133]: I1121 13:43:24.095141 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:24 crc kubenswrapper[5133]: I1121 13:43:24.095151 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:24 crc kubenswrapper[5133]: I1121 13:43:24.095170 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:24 crc kubenswrapper[5133]: I1121 13:43:24.095181 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:24Z","lastTransitionTime":"2025-11-21T13:43:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:24 crc kubenswrapper[5133]: I1121 13:43:24.120755 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"373d5da7-fae9-4689-9ede-6e2d69a54c02\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce076f27563e648bcbfd183634e87e0e31cedc359d0df1edc6af448b2a18f1a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b6bfce121246f367a034c172b839a31fe309cfc0f83db7ab4e48cb26d6a5145\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c53aca99f41348a8343f7a2a2afd9ca78e2e4ba6aae9bb06cdb3ed66c9d79aa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aee1dbbc6cd90fac255e86ddb27f159eba2e08dc6cc749a8eb351842330ee6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://563b9e061f37ddab57173a01efbf7bf025c470edccc47a03e7c5bb1e317a289f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd5003cc4327d8234259623232e844463af0efdc0b3e395fa3e2c30c714b872d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e71d9a11d15c18007af7493702bac6d13cc2845f32b14f37d6252bac480dbc38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://19c3c094e3683ffe1c79217cc7824910561aa6a632f12caed78bfb761f30b726\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T13:43:09Z\\\",\\\"message\\\":\\\"ontroller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:08Z is after 2025-08-24T17:21:41Z]\\\\nI1121 13:43:08.996396 6628 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:openshift-machine-config-operator/machine-config-daemon]} name:Service_openshift-machine-config-operator/machine-config-daemon_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.43:8798: 10.217.4.43:9001:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {a36f6289-d09f-43f8-8a8a-c9d2cc11eb0d}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1121 13:43:08.996441 6628 obj_retry.go:303] Retry objec\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:43:07Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e71d9a11d15c18007af7493702bac6d13cc2845f32b14f37d6252bac480dbc38\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T13:43:23Z\\\",\\\"message\\\":\\\"Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.110\\\\\\\", Port:8443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1121 13:43:23.558190 6798 ovn.go:134] Ensuring zone local for Pod openshift-multus/multus-additional-cni-plugins-b9v8b in node crc\\\\nI1121 13:43:23.558832 6798 obj_retry.go:386] Retry successful for *v1.Pod openshift-multus/multus-additional-cni-plugins-b9v8b after 0 failed attempt(s)\\\\nI1121 13:43:23.558866 6798 default_network_controller.go:776] Recording success event on pod openshift-multus/multus-additional-cni-plugins-b9v8b\\\\nI1121 13:43:23.558216 6798 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/multus-m5d24\\\\nI1121 13:43:23.558892 6798 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/multus-m5d24\\\\nI1121 13:43:23.558904 6798 ovn.go:134] Ensuring zone local for Pod openshift-multus/multus-m5d24 in node crc\\\\nI1121 13:43:23.558913 6798 obj_retry.go:386] Retry successful for *v1.Pod openshift-multus/multus-m5d24 after 0 failed attempt(s)\\\\nI1121 13:43:23.558921 6798 default_network_controller.go:776] Recordin\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:43:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ab3fdf87c8fc052cd429333579ede0e857fcc8399de947f661b159e6a5f2a93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tjzm8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:24Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:24 crc kubenswrapper[5133]: I1121 13:43:24.147176 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b9v8b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0ec3a98-4d89-4f36-a79e-ac65da8672ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://933bd113027e6a7ee2eba33df125dcdcf389c21b99c7dd32bc654edaf7278e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d21a06ad72a199d989c29326725c9d49df6fbb9fc6e9d54bcd1f7bb89c78b02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6d21a06ad72a199d989c29326725c9d49df6fbb9fc6e9d54bcd1f7bb89c78b02\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7216da1e17ab61329ca71325b40fdbd040dbf83f072d565302a571acb1313e53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7216da1e17ab61329ca71325b40fdbd040dbf83f072d565302a571acb1313e53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a466e70f0711cd7d64221f62309d99dfa9e4b9910a69b2397240cfdf244578\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3a466e70f0711cd7d64221f62309d99dfa9e4b9910a69b2397240cfdf244578\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:43:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a6a968fb55017d7e2fee878bb4d50904766820de016afd8f01de3bbd92b2421\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a6a968fb55017d7e2fee878bb4d50904766820de016afd8f01de3bbd92b2421\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:43:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:43:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b9v8b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:24Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:24 crc kubenswrapper[5133]: I1121 13:43:24.169553 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f826000-be5b-4f8f-bdc5-b80e11bb5e65\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cac876542527f108f89313704d6275aed6b735176f7f38b0fccbfcd79fdbf6e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa9a560543d545bd50cbb9aa0e907a992f9b3afb36de7ec5e72010dd835d2574\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c86055d41460f757efc29eaa62834faf3f14f9ca5ba534479d0fcd0a43d3bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bca102c9301a0963f4be54906a6ca418a585d0aa6b063a0512c2a334928f0d88\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T13:42:47Z\\\",\\\"message\\\":\\\"le observer\\\\nW1121 13:42:47.565555 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 13:42:47.567527 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 13:42:47.569658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3912986073/tls.crt::/tmp/serving-cert-3912986073/tls.key\\\\\\\"\\\\nI1121 13:42:47.852533 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 13:42:47.856751 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 13:42:47.856781 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 13:42:47.856814 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 13:42:47.856821 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 13:42:47.862211 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 13:42:47.862280 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862290 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862309 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 13:42:47.862319 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 13:42:47.862326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 13:42:47.862333 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 13:42:47.863057 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 13:42:47.865438 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6b16c41d8bc248fc4de65102a71d3875d1ab768432f61581605fa487ebfc9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:24Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:24 crc kubenswrapper[5133]: I1121 13:43:24.187950 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede95ef8b82acda5cadd081a37fcb2a35fab8269c7ec403bb33a6feb8bf9eb88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3037d1f01bc9704cae9aa3eb4760e4dc737b1990a6ae5a007d3ec412efad85a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:24Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:24 crc kubenswrapper[5133]: I1121 13:43:24.198958 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:24 crc kubenswrapper[5133]: I1121 13:43:24.199048 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:24 crc kubenswrapper[5133]: I1121 13:43:24.199065 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:24 crc kubenswrapper[5133]: I1121 13:43:24.199089 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:24 crc kubenswrapper[5133]: I1121 13:43:24.199110 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:24Z","lastTransitionTime":"2025-11-21T13:43:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:24 crc kubenswrapper[5133]: I1121 13:43:24.211080 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:24Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:24 crc kubenswrapper[5133]: I1121 13:43:24.231247 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:24Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:24 crc kubenswrapper[5133]: I1121 13:43:24.246556 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7vfdg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c337e24-9cef-4932-92ae-5a175379c77a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0eeb4cc8e3340d9eb9710ca7b55244d4055d6bc7cc31a6d227f233988b242823\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrzcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc648165e145d8ed6a43aff3ffb558380dc55ffd03816922b643bf7f740088fa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrzcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:43:05Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7vfdg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:24Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:24 crc kubenswrapper[5133]: I1121 13:43:24.263589 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75a51560-1657-43fa-880f-bece0b75e088\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1dc13dd497fa4611689b3c047c602511745dbff2b9a797f8ea7316046531717\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca37cc062dd8763ac13b5a07fcd06f2997b1ff9cfe38d9a9ff3091980d679932\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57552cf42d0ae179e9c1c67120613da40995fbe308e06dbd466d0f71167142b2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f00c6fbd3ed9ce5e2787b591f39bd80828b96dd53f15696adfe62c3df47ed47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f00c6fbd3ed9ce5e2787b591f39bd80828b96dd53f15696adfe62c3df47ed47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:24Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:24 crc kubenswrapper[5133]: I1121 13:43:24.285264 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:24Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:24 crc kubenswrapper[5133]: I1121 13:43:24.303546 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:24 crc kubenswrapper[5133]: I1121 13:43:24.303605 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:24 crc kubenswrapper[5133]: I1121 13:43:24.303625 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:24 crc kubenswrapper[5133]: I1121 13:43:24.303648 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:24 crc kubenswrapper[5133]: I1121 13:43:24.303667 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:24Z","lastTransitionTime":"2025-11-21T13:43:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:24 crc kubenswrapper[5133]: I1121 13:43:24.305275 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pvdwc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87822156-53e8-4eb5-b241-db506a21a1b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc374f8a6deccf60de941df327adc9f29d951a10746bd754c0d4f5573a141a71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jzt65\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pvdwc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:24Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:24 crc kubenswrapper[5133]: I1121 13:43:24.406354 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:24 crc kubenswrapper[5133]: I1121 13:43:24.406417 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:24 crc kubenswrapper[5133]: I1121 13:43:24.406436 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:24 crc kubenswrapper[5133]: I1121 13:43:24.406461 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:24 crc kubenswrapper[5133]: I1121 13:43:24.406479 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:24Z","lastTransitionTime":"2025-11-21T13:43:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:24 crc kubenswrapper[5133]: I1121 13:43:24.457704 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:43:24 crc kubenswrapper[5133]: E1121 13:43:24.458134 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 13:43:24 crc kubenswrapper[5133]: I1121 13:43:24.510208 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:24 crc kubenswrapper[5133]: I1121 13:43:24.510283 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:24 crc kubenswrapper[5133]: I1121 13:43:24.510304 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:24 crc kubenswrapper[5133]: I1121 13:43:24.510334 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:24 crc kubenswrapper[5133]: I1121 13:43:24.510356 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:24Z","lastTransitionTime":"2025-11-21T13:43:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:24 crc kubenswrapper[5133]: I1121 13:43:24.613714 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:24 crc kubenswrapper[5133]: I1121 13:43:24.613793 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:24 crc kubenswrapper[5133]: I1121 13:43:24.613815 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:24 crc kubenswrapper[5133]: I1121 13:43:24.613846 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:24 crc kubenswrapper[5133]: I1121 13:43:24.613869 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:24Z","lastTransitionTime":"2025-11-21T13:43:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:24 crc kubenswrapper[5133]: I1121 13:43:24.718081 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:24 crc kubenswrapper[5133]: I1121 13:43:24.718147 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:24 crc kubenswrapper[5133]: I1121 13:43:24.718180 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:24 crc kubenswrapper[5133]: I1121 13:43:24.718202 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:24 crc kubenswrapper[5133]: I1121 13:43:24.718246 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:24Z","lastTransitionTime":"2025-11-21T13:43:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:24 crc kubenswrapper[5133]: I1121 13:43:24.822421 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:24 crc kubenswrapper[5133]: I1121 13:43:24.822512 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:24 crc kubenswrapper[5133]: I1121 13:43:24.822538 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:24 crc kubenswrapper[5133]: I1121 13:43:24.822575 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:24 crc kubenswrapper[5133]: I1121 13:43:24.822602 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:24Z","lastTransitionTime":"2025-11-21T13:43:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:24 crc kubenswrapper[5133]: I1121 13:43:24.926692 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:24 crc kubenswrapper[5133]: I1121 13:43:24.926778 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:24 crc kubenswrapper[5133]: I1121 13:43:24.926796 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:24 crc kubenswrapper[5133]: I1121 13:43:24.926827 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:24 crc kubenswrapper[5133]: I1121 13:43:24.926845 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:24Z","lastTransitionTime":"2025-11-21T13:43:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:24 crc kubenswrapper[5133]: I1121 13:43:24.954193 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-tjzm8_373d5da7-fae9-4689-9ede-6e2d69a54c02/ovnkube-controller/2.log" Nov 21 13:43:24 crc kubenswrapper[5133]: I1121 13:43:24.959763 5133 scope.go:117] "RemoveContainer" containerID="e71d9a11d15c18007af7493702bac6d13cc2845f32b14f37d6252bac480dbc38" Nov 21 13:43:24 crc kubenswrapper[5133]: E1121 13:43:24.960128 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-tjzm8_openshift-ovn-kubernetes(373d5da7-fae9-4689-9ede-6e2d69a54c02)\"" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" podUID="373d5da7-fae9-4689-9ede-6e2d69a54c02" Nov 21 13:43:24 crc kubenswrapper[5133]: I1121 13:43:24.985716 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:24Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.004085 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1236e5c5c7c8db59fd2faa688e9b781fe94721cc8aa644dd9ab91df2684c617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:25Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.028106 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m5d24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0077329a-abad-4c6d-a601-2dc01fd83184\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24fe246ff402a8854ee5e55ccc507a2e497dbb2cdfed3f0f8b380f00b9436661\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmd8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m5d24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:25Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.030258 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.030311 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.030328 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.030352 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.030370 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:25Z","lastTransitionTime":"2025-11-21T13:43:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.050158 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f5a729-05d1-4f84-a216-1df3233af57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c46519115b067feef9d8fb5783b8b9420bf99d97515021a7d389e6cdf1d64112\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e5c730e837240b2ed45dff8a5411b8b49d21e7fbfb2dfcc6aef568b73b57745\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xxlvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:25Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.079880 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"373d5da7-fae9-4689-9ede-6e2d69a54c02\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce076f27563e648bcbfd183634e87e0e31cedc359d0df1edc6af448b2a18f1a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b6bfce121246f367a034c172b839a31fe309cfc0f83db7ab4e48cb26d6a5145\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c53aca99f41348a8343f7a2a2afd9ca78e2e4ba6aae9bb06cdb3ed66c9d79aa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aee1dbbc6cd90fac255e86ddb27f159eba2e08dc6cc749a8eb351842330ee6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://563b9e061f37ddab57173a01efbf7bf025c470edccc47a03e7c5bb1e317a289f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd5003cc4327d8234259623232e844463af0efdc0b3e395fa3e2c30c714b872d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e71d9a11d15c18007af7493702bac6d13cc2845f32b14f37d6252bac480dbc38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e71d9a11d15c18007af7493702bac6d13cc2845f32b14f37d6252bac480dbc38\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T13:43:23Z\\\",\\\"message\\\":\\\"Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.110\\\\\\\", Port:8443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1121 13:43:23.558190 6798 ovn.go:134] Ensuring zone local for Pod openshift-multus/multus-additional-cni-plugins-b9v8b in node crc\\\\nI1121 13:43:23.558832 6798 obj_retry.go:386] Retry successful for *v1.Pod openshift-multus/multus-additional-cni-plugins-b9v8b after 0 failed attempt(s)\\\\nI1121 13:43:23.558866 6798 default_network_controller.go:776] Recording success event on pod openshift-multus/multus-additional-cni-plugins-b9v8b\\\\nI1121 13:43:23.558216 6798 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/multus-m5d24\\\\nI1121 13:43:23.558892 6798 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/multus-m5d24\\\\nI1121 13:43:23.558904 6798 ovn.go:134] Ensuring zone local for Pod openshift-multus/multus-m5d24 in node crc\\\\nI1121 13:43:23.558913 6798 obj_retry.go:386] Retry successful for *v1.Pod openshift-multus/multus-m5d24 after 0 failed attempt(s)\\\\nI1121 13:43:23.558921 6798 default_network_controller.go:776] Recordin\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:43:22Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-tjzm8_openshift-ovn-kubernetes(373d5da7-fae9-4689-9ede-6e2d69a54c02)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ab3fdf87c8fc052cd429333579ede0e857fcc8399de947f661b159e6a5f2a93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tjzm8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:25Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.103559 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b9v8b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0ec3a98-4d89-4f36-a79e-ac65da8672ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://933bd113027e6a7ee2eba33df125dcdcf389c21b99c7dd32bc654edaf7278e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d21a06ad72a199d989c29326725c9d49df6fbb9fc6e9d54bcd1f7bb89c78b02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6d21a06ad72a199d989c29326725c9d49df6fbb9fc6e9d54bcd1f7bb89c78b02\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7216da1e17ab61329ca71325b40fdbd040dbf83f072d565302a571acb1313e53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7216da1e17ab61329ca71325b40fdbd040dbf83f072d565302a571acb1313e53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a466e70f0711cd7d64221f62309d99dfa9e4b9910a69b2397240cfdf244578\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3a466e70f0711cd7d64221f62309d99dfa9e4b9910a69b2397240cfdf244578\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:43:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a6a968fb55017d7e2fee878bb4d50904766820de016afd8f01de3bbd92b2421\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a6a968fb55017d7e2fee878bb4d50904766820de016afd8f01de3bbd92b2421\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:43:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:43:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b9v8b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:25Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.126545 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f826000-be5b-4f8f-bdc5-b80e11bb5e65\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cac876542527f108f89313704d6275aed6b735176f7f38b0fccbfcd79fdbf6e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa9a560543d545bd50cbb9aa0e907a992f9b3afb36de7ec5e72010dd835d2574\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c86055d41460f757efc29eaa62834faf3f14f9ca5ba534479d0fcd0a43d3bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bca102c9301a0963f4be54906a6ca418a585d0aa6b063a0512c2a334928f0d88\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T13:42:47Z\\\",\\\"message\\\":\\\"le observer\\\\nW1121 13:42:47.565555 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 13:42:47.567527 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 13:42:47.569658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3912986073/tls.crt::/tmp/serving-cert-3912986073/tls.key\\\\\\\"\\\\nI1121 13:42:47.852533 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 13:42:47.856751 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 13:42:47.856781 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 13:42:47.856814 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 13:42:47.856821 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 13:42:47.862211 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 13:42:47.862280 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862290 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862309 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 13:42:47.862319 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 13:42:47.862326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 13:42:47.862333 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 13:42:47.863057 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 13:42:47.865438 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6b16c41d8bc248fc4de65102a71d3875d1ab768432f61581605fa487ebfc9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:25Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.133248 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.133302 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.133504 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.133532 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.133554 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:25Z","lastTransitionTime":"2025-11-21T13:43:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.149134 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede95ef8b82acda5cadd081a37fcb2a35fab8269c7ec403bb33a6feb8bf9eb88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3037d1f01bc9704cae9aa3eb4760e4dc737b1990a6ae5a007d3ec412efad85a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:25Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.175628 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:25Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.193179 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7vfdg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c337e24-9cef-4932-92ae-5a175379c77a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0eeb4cc8e3340d9eb9710ca7b55244d4055d6bc7cc31a6d227f233988b242823\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrzcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc648165e145d8ed6a43aff3ffb558380dc55ffd03816922b643bf7f740088fa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrzcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:43:05Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7vfdg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:25Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.209510 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75a51560-1657-43fa-880f-bece0b75e088\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1dc13dd497fa4611689b3c047c602511745dbff2b9a797f8ea7316046531717\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca37cc062dd8763ac13b5a07fcd06f2997b1ff9cfe38d9a9ff3091980d679932\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57552cf42d0ae179e9c1c67120613da40995fbe308e06dbd466d0f71167142b2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f00c6fbd3ed9ce5e2787b591f39bd80828b96dd53f15696adfe62c3df47ed47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f00c6fbd3ed9ce5e2787b591f39bd80828b96dd53f15696adfe62c3df47ed47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:25Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.230547 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:25Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.236923 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.236978 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.237028 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.237062 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.237085 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:25Z","lastTransitionTime":"2025-11-21T13:43:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.245935 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pvdwc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87822156-53e8-4eb5-b241-db506a21a1b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc374f8a6deccf60de941df327adc9f29d951a10746bd754c0d4f5573a141a71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jzt65\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pvdwc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:25Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.259341 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bj52j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9cc533c-2914-45d2-97b4-d6e35361450d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b1cfd49e0e5564696bd26f92acb10ca3430f81dc3f690a51ecd7bfa14876bccb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdp6q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bj52j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:25Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.276788 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-x5wnh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3aabda0-97d9-4886-8909-1c423c4d3238\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p8kp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p8kp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:43:07Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-x5wnh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:25Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.293737 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c335cd8-618b-4871-a0e2-deaa61ddc49a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5f6320dbfb8d910e52de319fe5350b435c1c9f00a2e1d5b2b953fb6d1688984\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://273a23deb0bee7d80bc12f28a4056a5b843e81cc7c411273e49c3aa0fdba5182\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c93dddad8f7a853e1302ba96f3fc6d6626b22de64c8cfd1ee63996820d0816cd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebb7634a6507b2323d36c3d57b19c374862e0bada0e81150da9db315e5812f12\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:25Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.315174 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1b5e12d17b3e683349818698223816569514a9f4ae5d14ba1f5661c472fce39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:25Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.340445 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.340487 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.340497 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.340516 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.340527 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:25Z","lastTransitionTime":"2025-11-21T13:43:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.388692 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.388751 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.388763 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.388781 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.388795 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:25Z","lastTransitionTime":"2025-11-21T13:43:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:25 crc kubenswrapper[5133]: E1121 13:43:25.408186 5133 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:25Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:25Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"eb1f691e-5306-40d5-9666-4e51161aa15a\\\",\\\"systemUUID\\\":\\\"537cb059-79e6-48e5-b353-57bb495db8a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:25Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.419220 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.419268 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.419280 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.419299 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.419311 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:25Z","lastTransitionTime":"2025-11-21T13:43:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:25 crc kubenswrapper[5133]: E1121 13:43:25.435990 5133 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:25Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:25Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"eb1f691e-5306-40d5-9666-4e51161aa15a\\\",\\\"systemUUID\\\":\\\"537cb059-79e6-48e5-b353-57bb495db8a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:25Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.441475 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.441533 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.441548 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.441569 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.441585 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:25Z","lastTransitionTime":"2025-11-21T13:43:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.457381 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.457464 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-x5wnh" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.457383 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:43:25 crc kubenswrapper[5133]: E1121 13:43:25.457483 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 13:43:25 crc kubenswrapper[5133]: E1121 13:43:25.457550 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 13:43:25 crc kubenswrapper[5133]: E1121 13:43:25.457647 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-x5wnh" podUID="b3aabda0-97d9-4886-8909-1c423c4d3238" Nov 21 13:43:25 crc kubenswrapper[5133]: E1121 13:43:25.461476 5133 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:25Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:25Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"eb1f691e-5306-40d5-9666-4e51161aa15a\\\",\\\"systemUUID\\\":\\\"537cb059-79e6-48e5-b353-57bb495db8a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:25Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.466457 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.466494 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.466507 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.466540 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.466554 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:25Z","lastTransitionTime":"2025-11-21T13:43:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:25 crc kubenswrapper[5133]: E1121 13:43:25.481626 5133 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:25Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:25Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"eb1f691e-5306-40d5-9666-4e51161aa15a\\\",\\\"systemUUID\\\":\\\"537cb059-79e6-48e5-b353-57bb495db8a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:25Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.485078 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.485109 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.485119 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.485137 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.485151 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:25Z","lastTransitionTime":"2025-11-21T13:43:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:25 crc kubenswrapper[5133]: E1121 13:43:25.504393 5133 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:25Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:25Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"eb1f691e-5306-40d5-9666-4e51161aa15a\\\",\\\"systemUUID\\\":\\\"537cb059-79e6-48e5-b353-57bb495db8a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:25Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:25 crc kubenswrapper[5133]: E1121 13:43:25.504614 5133 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.506611 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.506650 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.506666 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.506688 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.506706 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:25Z","lastTransitionTime":"2025-11-21T13:43:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.610236 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.610295 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.610307 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.610328 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.610341 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:25Z","lastTransitionTime":"2025-11-21T13:43:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.714293 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.714342 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.714351 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.714370 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.714380 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:25Z","lastTransitionTime":"2025-11-21T13:43:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.817626 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.817688 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.817700 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.817721 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.817734 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:25Z","lastTransitionTime":"2025-11-21T13:43:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.920877 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.920939 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.920959 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.920983 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:25 crc kubenswrapper[5133]: I1121 13:43:25.921029 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:25Z","lastTransitionTime":"2025-11-21T13:43:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:26 crc kubenswrapper[5133]: I1121 13:43:26.024669 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:26 crc kubenswrapper[5133]: I1121 13:43:26.024734 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:26 crc kubenswrapper[5133]: I1121 13:43:26.024754 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:26 crc kubenswrapper[5133]: I1121 13:43:26.024779 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:26 crc kubenswrapper[5133]: I1121 13:43:26.024799 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:26Z","lastTransitionTime":"2025-11-21T13:43:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:26 crc kubenswrapper[5133]: I1121 13:43:26.128206 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:26 crc kubenswrapper[5133]: I1121 13:43:26.128299 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:26 crc kubenswrapper[5133]: I1121 13:43:26.128329 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:26 crc kubenswrapper[5133]: I1121 13:43:26.128361 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:26 crc kubenswrapper[5133]: I1121 13:43:26.128382 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:26Z","lastTransitionTime":"2025-11-21T13:43:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:26 crc kubenswrapper[5133]: I1121 13:43:26.232142 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:26 crc kubenswrapper[5133]: I1121 13:43:26.232195 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:26 crc kubenswrapper[5133]: I1121 13:43:26.232208 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:26 crc kubenswrapper[5133]: I1121 13:43:26.232229 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:26 crc kubenswrapper[5133]: I1121 13:43:26.232253 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:26Z","lastTransitionTime":"2025-11-21T13:43:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:26 crc kubenswrapper[5133]: I1121 13:43:26.336520 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:26 crc kubenswrapper[5133]: I1121 13:43:26.336589 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:26 crc kubenswrapper[5133]: I1121 13:43:26.336603 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:26 crc kubenswrapper[5133]: I1121 13:43:26.336630 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:26 crc kubenswrapper[5133]: I1121 13:43:26.336646 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:26Z","lastTransitionTime":"2025-11-21T13:43:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:26 crc kubenswrapper[5133]: I1121 13:43:26.440205 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:26 crc kubenswrapper[5133]: I1121 13:43:26.440289 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:26 crc kubenswrapper[5133]: I1121 13:43:26.440303 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:26 crc kubenswrapper[5133]: I1121 13:43:26.440328 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:26 crc kubenswrapper[5133]: I1121 13:43:26.440345 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:26Z","lastTransitionTime":"2025-11-21T13:43:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:26 crc kubenswrapper[5133]: I1121 13:43:26.457709 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:43:26 crc kubenswrapper[5133]: E1121 13:43:26.457927 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 13:43:26 crc kubenswrapper[5133]: I1121 13:43:26.544222 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:26 crc kubenswrapper[5133]: I1121 13:43:26.544292 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:26 crc kubenswrapper[5133]: I1121 13:43:26.544310 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:26 crc kubenswrapper[5133]: I1121 13:43:26.544336 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:26 crc kubenswrapper[5133]: I1121 13:43:26.544356 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:26Z","lastTransitionTime":"2025-11-21T13:43:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:26 crc kubenswrapper[5133]: I1121 13:43:26.647322 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:26 crc kubenswrapper[5133]: I1121 13:43:26.647382 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:26 crc kubenswrapper[5133]: I1121 13:43:26.647398 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:26 crc kubenswrapper[5133]: I1121 13:43:26.647425 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:26 crc kubenswrapper[5133]: I1121 13:43:26.647442 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:26Z","lastTransitionTime":"2025-11-21T13:43:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:26 crc kubenswrapper[5133]: I1121 13:43:26.750375 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:26 crc kubenswrapper[5133]: I1121 13:43:26.750483 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:26 crc kubenswrapper[5133]: I1121 13:43:26.750506 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:26 crc kubenswrapper[5133]: I1121 13:43:26.750534 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:26 crc kubenswrapper[5133]: I1121 13:43:26.750560 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:26Z","lastTransitionTime":"2025-11-21T13:43:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:26 crc kubenswrapper[5133]: I1121 13:43:26.853650 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:26 crc kubenswrapper[5133]: I1121 13:43:26.853709 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:26 crc kubenswrapper[5133]: I1121 13:43:26.853731 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:26 crc kubenswrapper[5133]: I1121 13:43:26.853759 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:26 crc kubenswrapper[5133]: I1121 13:43:26.853776 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:26Z","lastTransitionTime":"2025-11-21T13:43:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:26 crc kubenswrapper[5133]: I1121 13:43:26.957721 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:26 crc kubenswrapper[5133]: I1121 13:43:26.957831 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:26 crc kubenswrapper[5133]: I1121 13:43:26.957844 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:26 crc kubenswrapper[5133]: I1121 13:43:26.957872 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:26 crc kubenswrapper[5133]: I1121 13:43:26.957887 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:26Z","lastTransitionTime":"2025-11-21T13:43:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:27 crc kubenswrapper[5133]: I1121 13:43:27.062243 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:27 crc kubenswrapper[5133]: I1121 13:43:27.062315 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:27 crc kubenswrapper[5133]: I1121 13:43:27.062332 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:27 crc kubenswrapper[5133]: I1121 13:43:27.062359 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:27 crc kubenswrapper[5133]: I1121 13:43:27.062378 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:27Z","lastTransitionTime":"2025-11-21T13:43:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:27 crc kubenswrapper[5133]: I1121 13:43:27.165959 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:27 crc kubenswrapper[5133]: I1121 13:43:27.166156 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:27 crc kubenswrapper[5133]: I1121 13:43:27.166192 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:27 crc kubenswrapper[5133]: I1121 13:43:27.166221 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:27 crc kubenswrapper[5133]: I1121 13:43:27.166244 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:27Z","lastTransitionTime":"2025-11-21T13:43:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:27 crc kubenswrapper[5133]: I1121 13:43:27.270040 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:27 crc kubenswrapper[5133]: I1121 13:43:27.270093 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:27 crc kubenswrapper[5133]: I1121 13:43:27.270105 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:27 crc kubenswrapper[5133]: I1121 13:43:27.270124 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:27 crc kubenswrapper[5133]: I1121 13:43:27.270140 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:27Z","lastTransitionTime":"2025-11-21T13:43:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:27 crc kubenswrapper[5133]: I1121 13:43:27.372613 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:27 crc kubenswrapper[5133]: I1121 13:43:27.372673 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:27 crc kubenswrapper[5133]: I1121 13:43:27.372695 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:27 crc kubenswrapper[5133]: I1121 13:43:27.372726 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:27 crc kubenswrapper[5133]: I1121 13:43:27.372745 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:27Z","lastTransitionTime":"2025-11-21T13:43:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:27 crc kubenswrapper[5133]: I1121 13:43:27.457638 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-x5wnh" Nov 21 13:43:27 crc kubenswrapper[5133]: E1121 13:43:27.457850 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-x5wnh" podUID="b3aabda0-97d9-4886-8909-1c423c4d3238" Nov 21 13:43:27 crc kubenswrapper[5133]: I1121 13:43:27.458442 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:43:27 crc kubenswrapper[5133]: E1121 13:43:27.458511 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 13:43:27 crc kubenswrapper[5133]: I1121 13:43:27.458563 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:43:27 crc kubenswrapper[5133]: E1121 13:43:27.458620 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 13:43:27 crc kubenswrapper[5133]: I1121 13:43:27.475570 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:27 crc kubenswrapper[5133]: I1121 13:43:27.475618 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:27 crc kubenswrapper[5133]: I1121 13:43:27.475630 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:27 crc kubenswrapper[5133]: I1121 13:43:27.475647 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:27 crc kubenswrapper[5133]: I1121 13:43:27.475661 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:27Z","lastTransitionTime":"2025-11-21T13:43:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:27 crc kubenswrapper[5133]: I1121 13:43:27.579892 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:27 crc kubenswrapper[5133]: I1121 13:43:27.579963 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:27 crc kubenswrapper[5133]: I1121 13:43:27.579981 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:27 crc kubenswrapper[5133]: I1121 13:43:27.580035 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:27 crc kubenswrapper[5133]: I1121 13:43:27.580054 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:27Z","lastTransitionTime":"2025-11-21T13:43:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:27 crc kubenswrapper[5133]: I1121 13:43:27.682853 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:27 crc kubenswrapper[5133]: I1121 13:43:27.682898 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:27 crc kubenswrapper[5133]: I1121 13:43:27.682909 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:27 crc kubenswrapper[5133]: I1121 13:43:27.682924 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:27 crc kubenswrapper[5133]: I1121 13:43:27.682935 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:27Z","lastTransitionTime":"2025-11-21T13:43:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:27 crc kubenswrapper[5133]: I1121 13:43:27.786149 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:27 crc kubenswrapper[5133]: I1121 13:43:27.786207 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:27 crc kubenswrapper[5133]: I1121 13:43:27.786228 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:27 crc kubenswrapper[5133]: I1121 13:43:27.786256 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:27 crc kubenswrapper[5133]: I1121 13:43:27.786276 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:27Z","lastTransitionTime":"2025-11-21T13:43:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:27 crc kubenswrapper[5133]: I1121 13:43:27.889052 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:27 crc kubenswrapper[5133]: I1121 13:43:27.889094 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:27 crc kubenswrapper[5133]: I1121 13:43:27.889105 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:27 crc kubenswrapper[5133]: I1121 13:43:27.889121 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:27 crc kubenswrapper[5133]: I1121 13:43:27.889134 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:27Z","lastTransitionTime":"2025-11-21T13:43:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:27 crc kubenswrapper[5133]: I1121 13:43:27.991992 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:27 crc kubenswrapper[5133]: I1121 13:43:27.992083 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:27 crc kubenswrapper[5133]: I1121 13:43:27.992103 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:27 crc kubenswrapper[5133]: I1121 13:43:27.992128 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:27 crc kubenswrapper[5133]: I1121 13:43:27.992145 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:27Z","lastTransitionTime":"2025-11-21T13:43:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:28 crc kubenswrapper[5133]: I1121 13:43:28.094067 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:28 crc kubenswrapper[5133]: I1121 13:43:28.094140 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:28 crc kubenswrapper[5133]: I1121 13:43:28.094162 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:28 crc kubenswrapper[5133]: I1121 13:43:28.094189 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:28 crc kubenswrapper[5133]: I1121 13:43:28.094214 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:28Z","lastTransitionTime":"2025-11-21T13:43:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:28 crc kubenswrapper[5133]: I1121 13:43:28.197159 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:28 crc kubenswrapper[5133]: I1121 13:43:28.197218 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:28 crc kubenswrapper[5133]: I1121 13:43:28.197235 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:28 crc kubenswrapper[5133]: I1121 13:43:28.197259 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:28 crc kubenswrapper[5133]: I1121 13:43:28.197275 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:28Z","lastTransitionTime":"2025-11-21T13:43:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:28 crc kubenswrapper[5133]: I1121 13:43:28.299522 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:28 crc kubenswrapper[5133]: I1121 13:43:28.299595 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:28 crc kubenswrapper[5133]: I1121 13:43:28.299644 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:28 crc kubenswrapper[5133]: I1121 13:43:28.299676 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:28 crc kubenswrapper[5133]: I1121 13:43:28.299700 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:28Z","lastTransitionTime":"2025-11-21T13:43:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:28 crc kubenswrapper[5133]: I1121 13:43:28.401788 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:28 crc kubenswrapper[5133]: I1121 13:43:28.401833 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:28 crc kubenswrapper[5133]: I1121 13:43:28.401843 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:28 crc kubenswrapper[5133]: I1121 13:43:28.401857 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:28 crc kubenswrapper[5133]: I1121 13:43:28.401867 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:28Z","lastTransitionTime":"2025-11-21T13:43:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:28 crc kubenswrapper[5133]: I1121 13:43:28.456988 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:43:28 crc kubenswrapper[5133]: E1121 13:43:28.457143 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 13:43:28 crc kubenswrapper[5133]: I1121 13:43:28.504180 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:28 crc kubenswrapper[5133]: I1121 13:43:28.504246 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:28 crc kubenswrapper[5133]: I1121 13:43:28.504256 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:28 crc kubenswrapper[5133]: I1121 13:43:28.504269 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:28 crc kubenswrapper[5133]: I1121 13:43:28.504277 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:28Z","lastTransitionTime":"2025-11-21T13:43:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:28 crc kubenswrapper[5133]: I1121 13:43:28.606659 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:28 crc kubenswrapper[5133]: I1121 13:43:28.606747 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:28 crc kubenswrapper[5133]: I1121 13:43:28.606770 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:28 crc kubenswrapper[5133]: I1121 13:43:28.606802 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:28 crc kubenswrapper[5133]: I1121 13:43:28.606825 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:28Z","lastTransitionTime":"2025-11-21T13:43:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:28 crc kubenswrapper[5133]: I1121 13:43:28.709152 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:28 crc kubenswrapper[5133]: I1121 13:43:28.709180 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:28 crc kubenswrapper[5133]: I1121 13:43:28.709190 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:28 crc kubenswrapper[5133]: I1121 13:43:28.709204 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:28 crc kubenswrapper[5133]: I1121 13:43:28.709215 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:28Z","lastTransitionTime":"2025-11-21T13:43:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:28 crc kubenswrapper[5133]: I1121 13:43:28.811207 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:28 crc kubenswrapper[5133]: I1121 13:43:28.811237 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:28 crc kubenswrapper[5133]: I1121 13:43:28.811244 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:28 crc kubenswrapper[5133]: I1121 13:43:28.811256 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:28 crc kubenswrapper[5133]: I1121 13:43:28.811265 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:28Z","lastTransitionTime":"2025-11-21T13:43:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:28 crc kubenswrapper[5133]: I1121 13:43:28.913192 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:28 crc kubenswrapper[5133]: I1121 13:43:28.913258 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:28 crc kubenswrapper[5133]: I1121 13:43:28.913275 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:28 crc kubenswrapper[5133]: I1121 13:43:28.913301 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:28 crc kubenswrapper[5133]: I1121 13:43:28.913327 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:28Z","lastTransitionTime":"2025-11-21T13:43:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:29 crc kubenswrapper[5133]: I1121 13:43:29.017920 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:29 crc kubenswrapper[5133]: I1121 13:43:29.018062 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:29 crc kubenswrapper[5133]: I1121 13:43:29.018089 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:29 crc kubenswrapper[5133]: I1121 13:43:29.018117 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:29 crc kubenswrapper[5133]: I1121 13:43:29.018136 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:29Z","lastTransitionTime":"2025-11-21T13:43:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:29 crc kubenswrapper[5133]: I1121 13:43:29.120491 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:29 crc kubenswrapper[5133]: I1121 13:43:29.120558 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:29 crc kubenswrapper[5133]: I1121 13:43:29.120573 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:29 crc kubenswrapper[5133]: I1121 13:43:29.120595 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:29 crc kubenswrapper[5133]: I1121 13:43:29.120617 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:29Z","lastTransitionTime":"2025-11-21T13:43:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:29 crc kubenswrapper[5133]: I1121 13:43:29.224316 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:29 crc kubenswrapper[5133]: I1121 13:43:29.224468 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:29 crc kubenswrapper[5133]: I1121 13:43:29.224491 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:29 crc kubenswrapper[5133]: I1121 13:43:29.224524 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:29 crc kubenswrapper[5133]: I1121 13:43:29.224546 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:29Z","lastTransitionTime":"2025-11-21T13:43:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:29 crc kubenswrapper[5133]: I1121 13:43:29.327713 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:29 crc kubenswrapper[5133]: I1121 13:43:29.327754 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:29 crc kubenswrapper[5133]: I1121 13:43:29.327765 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:29 crc kubenswrapper[5133]: I1121 13:43:29.327782 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:29 crc kubenswrapper[5133]: I1121 13:43:29.327793 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:29Z","lastTransitionTime":"2025-11-21T13:43:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:29 crc kubenswrapper[5133]: I1121 13:43:29.430036 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:29 crc kubenswrapper[5133]: I1121 13:43:29.430075 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:29 crc kubenswrapper[5133]: I1121 13:43:29.430083 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:29 crc kubenswrapper[5133]: I1121 13:43:29.430120 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:29 crc kubenswrapper[5133]: I1121 13:43:29.430129 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:29Z","lastTransitionTime":"2025-11-21T13:43:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:29 crc kubenswrapper[5133]: I1121 13:43:29.456633 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:43:29 crc kubenswrapper[5133]: E1121 13:43:29.456786 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 13:43:29 crc kubenswrapper[5133]: I1121 13:43:29.457036 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:43:29 crc kubenswrapper[5133]: I1121 13:43:29.457111 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-x5wnh" Nov 21 13:43:29 crc kubenswrapper[5133]: E1121 13:43:29.457154 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 13:43:29 crc kubenswrapper[5133]: E1121 13:43:29.457267 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-x5wnh" podUID="b3aabda0-97d9-4886-8909-1c423c4d3238" Nov 21 13:43:29 crc kubenswrapper[5133]: I1121 13:43:29.533171 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:29 crc kubenswrapper[5133]: I1121 13:43:29.533201 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:29 crc kubenswrapper[5133]: I1121 13:43:29.533209 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:29 crc kubenswrapper[5133]: I1121 13:43:29.533223 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:29 crc kubenswrapper[5133]: I1121 13:43:29.533232 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:29Z","lastTransitionTime":"2025-11-21T13:43:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:29 crc kubenswrapper[5133]: I1121 13:43:29.635470 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:29 crc kubenswrapper[5133]: I1121 13:43:29.635581 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:29 crc kubenswrapper[5133]: I1121 13:43:29.635605 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:29 crc kubenswrapper[5133]: I1121 13:43:29.635636 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:29 crc kubenswrapper[5133]: I1121 13:43:29.635660 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:29Z","lastTransitionTime":"2025-11-21T13:43:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:29 crc kubenswrapper[5133]: I1121 13:43:29.738578 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:29 crc kubenswrapper[5133]: I1121 13:43:29.738687 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:29 crc kubenswrapper[5133]: I1121 13:43:29.738712 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:29 crc kubenswrapper[5133]: I1121 13:43:29.738741 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:29 crc kubenswrapper[5133]: I1121 13:43:29.738764 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:29Z","lastTransitionTime":"2025-11-21T13:43:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:29 crc kubenswrapper[5133]: I1121 13:43:29.841479 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:29 crc kubenswrapper[5133]: I1121 13:43:29.841533 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:29 crc kubenswrapper[5133]: I1121 13:43:29.841547 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:29 crc kubenswrapper[5133]: I1121 13:43:29.841567 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:29 crc kubenswrapper[5133]: I1121 13:43:29.841582 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:29Z","lastTransitionTime":"2025-11-21T13:43:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:29 crc kubenswrapper[5133]: I1121 13:43:29.945175 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:29 crc kubenswrapper[5133]: I1121 13:43:29.945235 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:29 crc kubenswrapper[5133]: I1121 13:43:29.945253 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:29 crc kubenswrapper[5133]: I1121 13:43:29.945276 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:29 crc kubenswrapper[5133]: I1121 13:43:29.945294 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:29Z","lastTransitionTime":"2025-11-21T13:43:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:30 crc kubenswrapper[5133]: I1121 13:43:30.048086 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:30 crc kubenswrapper[5133]: I1121 13:43:30.048135 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:30 crc kubenswrapper[5133]: I1121 13:43:30.048148 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:30 crc kubenswrapper[5133]: I1121 13:43:30.048164 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:30 crc kubenswrapper[5133]: I1121 13:43:30.048177 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:30Z","lastTransitionTime":"2025-11-21T13:43:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:30 crc kubenswrapper[5133]: I1121 13:43:30.150714 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:30 crc kubenswrapper[5133]: I1121 13:43:30.150778 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:30 crc kubenswrapper[5133]: I1121 13:43:30.150789 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:30 crc kubenswrapper[5133]: I1121 13:43:30.150804 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:30 crc kubenswrapper[5133]: I1121 13:43:30.150815 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:30Z","lastTransitionTime":"2025-11-21T13:43:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:30 crc kubenswrapper[5133]: I1121 13:43:30.253327 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:30 crc kubenswrapper[5133]: I1121 13:43:30.253399 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:30 crc kubenswrapper[5133]: I1121 13:43:30.253411 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:30 crc kubenswrapper[5133]: I1121 13:43:30.253429 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:30 crc kubenswrapper[5133]: I1121 13:43:30.253441 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:30Z","lastTransitionTime":"2025-11-21T13:43:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:30 crc kubenswrapper[5133]: I1121 13:43:30.356544 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:30 crc kubenswrapper[5133]: I1121 13:43:30.356607 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:30 crc kubenswrapper[5133]: I1121 13:43:30.356626 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:30 crc kubenswrapper[5133]: I1121 13:43:30.356655 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:30 crc kubenswrapper[5133]: I1121 13:43:30.356678 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:30Z","lastTransitionTime":"2025-11-21T13:43:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:30 crc kubenswrapper[5133]: I1121 13:43:30.456902 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:43:30 crc kubenswrapper[5133]: E1121 13:43:30.457023 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 13:43:30 crc kubenswrapper[5133]: I1121 13:43:30.458522 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:30 crc kubenswrapper[5133]: I1121 13:43:30.458550 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:30 crc kubenswrapper[5133]: I1121 13:43:30.458559 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:30 crc kubenswrapper[5133]: I1121 13:43:30.458575 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:30 crc kubenswrapper[5133]: I1121 13:43:30.458584 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:30Z","lastTransitionTime":"2025-11-21T13:43:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:30 crc kubenswrapper[5133]: I1121 13:43:30.560611 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:30 crc kubenswrapper[5133]: I1121 13:43:30.560662 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:30 crc kubenswrapper[5133]: I1121 13:43:30.560676 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:30 crc kubenswrapper[5133]: I1121 13:43:30.560693 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:30 crc kubenswrapper[5133]: I1121 13:43:30.560705 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:30Z","lastTransitionTime":"2025-11-21T13:43:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:30 crc kubenswrapper[5133]: I1121 13:43:30.663864 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:30 crc kubenswrapper[5133]: I1121 13:43:30.663928 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:30 crc kubenswrapper[5133]: I1121 13:43:30.663948 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:30 crc kubenswrapper[5133]: I1121 13:43:30.663973 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:30 crc kubenswrapper[5133]: I1121 13:43:30.663991 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:30Z","lastTransitionTime":"2025-11-21T13:43:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:30 crc kubenswrapper[5133]: I1121 13:43:30.767247 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:30 crc kubenswrapper[5133]: I1121 13:43:30.767320 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:30 crc kubenswrapper[5133]: I1121 13:43:30.767339 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:30 crc kubenswrapper[5133]: I1121 13:43:30.767366 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:30 crc kubenswrapper[5133]: I1121 13:43:30.767387 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:30Z","lastTransitionTime":"2025-11-21T13:43:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:30 crc kubenswrapper[5133]: I1121 13:43:30.870150 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:30 crc kubenswrapper[5133]: I1121 13:43:30.870228 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:30 crc kubenswrapper[5133]: I1121 13:43:30.870254 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:30 crc kubenswrapper[5133]: I1121 13:43:30.870289 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:30 crc kubenswrapper[5133]: I1121 13:43:30.870314 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:30Z","lastTransitionTime":"2025-11-21T13:43:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:30 crc kubenswrapper[5133]: I1121 13:43:30.973201 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:30 crc kubenswrapper[5133]: I1121 13:43:30.973247 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:30 crc kubenswrapper[5133]: I1121 13:43:30.973257 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:30 crc kubenswrapper[5133]: I1121 13:43:30.973274 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:30 crc kubenswrapper[5133]: I1121 13:43:30.973284 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:30Z","lastTransitionTime":"2025-11-21T13:43:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:31 crc kubenswrapper[5133]: I1121 13:43:31.075926 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:31 crc kubenswrapper[5133]: I1121 13:43:31.076029 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:31 crc kubenswrapper[5133]: I1121 13:43:31.076055 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:31 crc kubenswrapper[5133]: I1121 13:43:31.076084 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:31 crc kubenswrapper[5133]: I1121 13:43:31.076107 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:31Z","lastTransitionTime":"2025-11-21T13:43:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:31 crc kubenswrapper[5133]: I1121 13:43:31.178253 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:31 crc kubenswrapper[5133]: I1121 13:43:31.178280 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:31 crc kubenswrapper[5133]: I1121 13:43:31.178293 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:31 crc kubenswrapper[5133]: I1121 13:43:31.178310 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:31 crc kubenswrapper[5133]: I1121 13:43:31.178321 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:31Z","lastTransitionTime":"2025-11-21T13:43:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:31 crc kubenswrapper[5133]: I1121 13:43:31.281010 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:31 crc kubenswrapper[5133]: I1121 13:43:31.281049 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:31 crc kubenswrapper[5133]: I1121 13:43:31.281059 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:31 crc kubenswrapper[5133]: I1121 13:43:31.281073 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:31 crc kubenswrapper[5133]: I1121 13:43:31.281085 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:31Z","lastTransitionTime":"2025-11-21T13:43:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:31 crc kubenswrapper[5133]: I1121 13:43:31.383970 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:31 crc kubenswrapper[5133]: I1121 13:43:31.384205 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:31 crc kubenswrapper[5133]: I1121 13:43:31.384227 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:31 crc kubenswrapper[5133]: I1121 13:43:31.384296 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:31 crc kubenswrapper[5133]: I1121 13:43:31.384328 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:31Z","lastTransitionTime":"2025-11-21T13:43:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:31 crc kubenswrapper[5133]: I1121 13:43:31.456942 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:43:31 crc kubenswrapper[5133]: I1121 13:43:31.457043 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:43:31 crc kubenswrapper[5133]: I1121 13:43:31.456951 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-x5wnh" Nov 21 13:43:31 crc kubenswrapper[5133]: E1121 13:43:31.457189 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 13:43:31 crc kubenswrapper[5133]: E1121 13:43:31.457442 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-x5wnh" podUID="b3aabda0-97d9-4886-8909-1c423c4d3238" Nov 21 13:43:31 crc kubenswrapper[5133]: E1121 13:43:31.457338 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 13:43:31 crc kubenswrapper[5133]: I1121 13:43:31.486081 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:31 crc kubenswrapper[5133]: I1121 13:43:31.486120 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:31 crc kubenswrapper[5133]: I1121 13:43:31.486131 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:31 crc kubenswrapper[5133]: I1121 13:43:31.486145 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:31 crc kubenswrapper[5133]: I1121 13:43:31.486157 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:31Z","lastTransitionTime":"2025-11-21T13:43:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:31 crc kubenswrapper[5133]: I1121 13:43:31.589740 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:31 crc kubenswrapper[5133]: I1121 13:43:31.589782 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:31 crc kubenswrapper[5133]: I1121 13:43:31.589792 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:31 crc kubenswrapper[5133]: I1121 13:43:31.589805 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:31 crc kubenswrapper[5133]: I1121 13:43:31.589814 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:31Z","lastTransitionTime":"2025-11-21T13:43:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:31 crc kubenswrapper[5133]: I1121 13:43:31.692948 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:31 crc kubenswrapper[5133]: I1121 13:43:31.692991 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:31 crc kubenswrapper[5133]: I1121 13:43:31.693022 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:31 crc kubenswrapper[5133]: I1121 13:43:31.693036 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:31 crc kubenswrapper[5133]: I1121 13:43:31.693045 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:31Z","lastTransitionTime":"2025-11-21T13:43:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:31 crc kubenswrapper[5133]: I1121 13:43:31.795633 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:31 crc kubenswrapper[5133]: I1121 13:43:31.795699 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:31 crc kubenswrapper[5133]: I1121 13:43:31.795721 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:31 crc kubenswrapper[5133]: I1121 13:43:31.795754 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:31 crc kubenswrapper[5133]: I1121 13:43:31.795775 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:31Z","lastTransitionTime":"2025-11-21T13:43:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:31 crc kubenswrapper[5133]: I1121 13:43:31.898143 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:31 crc kubenswrapper[5133]: I1121 13:43:31.898195 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:31 crc kubenswrapper[5133]: I1121 13:43:31.898215 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:31 crc kubenswrapper[5133]: I1121 13:43:31.898231 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:31 crc kubenswrapper[5133]: I1121 13:43:31.898242 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:31Z","lastTransitionTime":"2025-11-21T13:43:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:32 crc kubenswrapper[5133]: I1121 13:43:32.004161 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:32 crc kubenswrapper[5133]: I1121 13:43:32.004430 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:32 crc kubenswrapper[5133]: I1121 13:43:32.004621 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:32 crc kubenswrapper[5133]: I1121 13:43:32.004758 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:32 crc kubenswrapper[5133]: I1121 13:43:32.004889 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:32Z","lastTransitionTime":"2025-11-21T13:43:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:32 crc kubenswrapper[5133]: I1121 13:43:32.109244 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:32 crc kubenswrapper[5133]: I1121 13:43:32.109302 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:32 crc kubenswrapper[5133]: I1121 13:43:32.109313 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:32 crc kubenswrapper[5133]: I1121 13:43:32.109332 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:32 crc kubenswrapper[5133]: I1121 13:43:32.109346 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:32Z","lastTransitionTime":"2025-11-21T13:43:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:32 crc kubenswrapper[5133]: I1121 13:43:32.212109 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:32 crc kubenswrapper[5133]: I1121 13:43:32.212205 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:32 crc kubenswrapper[5133]: I1121 13:43:32.212226 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:32 crc kubenswrapper[5133]: I1121 13:43:32.212261 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:32 crc kubenswrapper[5133]: I1121 13:43:32.212285 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:32Z","lastTransitionTime":"2025-11-21T13:43:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:32 crc kubenswrapper[5133]: I1121 13:43:32.314929 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:32 crc kubenswrapper[5133]: I1121 13:43:32.314981 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:32 crc kubenswrapper[5133]: I1121 13:43:32.314994 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:32 crc kubenswrapper[5133]: I1121 13:43:32.315028 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:32 crc kubenswrapper[5133]: I1121 13:43:32.315041 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:32Z","lastTransitionTime":"2025-11-21T13:43:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:32 crc kubenswrapper[5133]: I1121 13:43:32.417309 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:32 crc kubenswrapper[5133]: I1121 13:43:32.417347 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:32 crc kubenswrapper[5133]: I1121 13:43:32.417356 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:32 crc kubenswrapper[5133]: I1121 13:43:32.417369 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:32 crc kubenswrapper[5133]: I1121 13:43:32.417378 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:32Z","lastTransitionTime":"2025-11-21T13:43:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:32 crc kubenswrapper[5133]: I1121 13:43:32.457118 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:43:32 crc kubenswrapper[5133]: E1121 13:43:32.457355 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 13:43:32 crc kubenswrapper[5133]: I1121 13:43:32.476838 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-x5wnh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3aabda0-97d9-4886-8909-1c423c4d3238\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p8kp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p8kp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:43:07Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-x5wnh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:32Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:32 crc kubenswrapper[5133]: I1121 13:43:32.493559 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c335cd8-618b-4871-a0e2-deaa61ddc49a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5f6320dbfb8d910e52de319fe5350b435c1c9f00a2e1d5b2b953fb6d1688984\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://273a23deb0bee7d80bc12f28a4056a5b843e81cc7c411273e49c3aa0fdba5182\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c93dddad8f7a853e1302ba96f3fc6d6626b22de64c8cfd1ee63996820d0816cd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebb7634a6507b2323d36c3d57b19c374862e0bada0e81150da9db315e5812f12\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:32Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:32 crc kubenswrapper[5133]: I1121 13:43:32.507464 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1b5e12d17b3e683349818698223816569514a9f4ae5d14ba1f5661c472fce39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:32Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:32 crc kubenswrapper[5133]: I1121 13:43:32.517570 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bj52j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9cc533c-2914-45d2-97b4-d6e35361450d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b1cfd49e0e5564696bd26f92acb10ca3430f81dc3f690a51ecd7bfa14876bccb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdp6q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bj52j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:32Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:32 crc kubenswrapper[5133]: I1121 13:43:32.519050 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:32 crc kubenswrapper[5133]: I1121 13:43:32.519099 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:32 crc kubenswrapper[5133]: I1121 13:43:32.519112 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:32 crc kubenswrapper[5133]: I1121 13:43:32.519129 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:32 crc kubenswrapper[5133]: I1121 13:43:32.519142 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:32Z","lastTransitionTime":"2025-11-21T13:43:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:32 crc kubenswrapper[5133]: I1121 13:43:32.530517 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1236e5c5c7c8db59fd2faa688e9b781fe94721cc8aa644dd9ab91df2684c617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:32Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:32 crc kubenswrapper[5133]: I1121 13:43:32.542571 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m5d24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0077329a-abad-4c6d-a601-2dc01fd83184\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24fe246ff402a8854ee5e55ccc507a2e497dbb2cdfed3f0f8b380f00b9436661\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmd8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m5d24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:32Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:32 crc kubenswrapper[5133]: I1121 13:43:32.553052 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f5a729-05d1-4f84-a216-1df3233af57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c46519115b067feef9d8fb5783b8b9420bf99d97515021a7d389e6cdf1d64112\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e5c730e837240b2ed45dff8a5411b8b49d21e7fbfb2dfcc6aef568b73b57745\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xxlvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:32Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:32 crc kubenswrapper[5133]: I1121 13:43:32.571888 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"373d5da7-fae9-4689-9ede-6e2d69a54c02\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce076f27563e648bcbfd183634e87e0e31cedc359d0df1edc6af448b2a18f1a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b6bfce121246f367a034c172b839a31fe309cfc0f83db7ab4e48cb26d6a5145\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c53aca99f41348a8343f7a2a2afd9ca78e2e4ba6aae9bb06cdb3ed66c9d79aa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aee1dbbc6cd90fac255e86ddb27f159eba2e08dc6cc749a8eb351842330ee6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://563b9e061f37ddab57173a01efbf7bf025c470edccc47a03e7c5bb1e317a289f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd5003cc4327d8234259623232e844463af0efdc0b3e395fa3e2c30c714b872d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e71d9a11d15c18007af7493702bac6d13cc2845f32b14f37d6252bac480dbc38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e71d9a11d15c18007af7493702bac6d13cc2845f32b14f37d6252bac480dbc38\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T13:43:23Z\\\",\\\"message\\\":\\\"Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.110\\\\\\\", Port:8443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1121 13:43:23.558190 6798 ovn.go:134] Ensuring zone local for Pod openshift-multus/multus-additional-cni-plugins-b9v8b in node crc\\\\nI1121 13:43:23.558832 6798 obj_retry.go:386] Retry successful for *v1.Pod openshift-multus/multus-additional-cni-plugins-b9v8b after 0 failed attempt(s)\\\\nI1121 13:43:23.558866 6798 default_network_controller.go:776] Recording success event on pod openshift-multus/multus-additional-cni-plugins-b9v8b\\\\nI1121 13:43:23.558216 6798 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/multus-m5d24\\\\nI1121 13:43:23.558892 6798 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/multus-m5d24\\\\nI1121 13:43:23.558904 6798 ovn.go:134] Ensuring zone local for Pod openshift-multus/multus-m5d24 in node crc\\\\nI1121 13:43:23.558913 6798 obj_retry.go:386] Retry successful for *v1.Pod openshift-multus/multus-m5d24 after 0 failed attempt(s)\\\\nI1121 13:43:23.558921 6798 default_network_controller.go:776] Recordin\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:43:22Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-tjzm8_openshift-ovn-kubernetes(373d5da7-fae9-4689-9ede-6e2d69a54c02)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ab3fdf87c8fc052cd429333579ede0e857fcc8399de947f661b159e6a5f2a93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tjzm8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:32Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:32 crc kubenswrapper[5133]: I1121 13:43:32.586662 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b9v8b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0ec3a98-4d89-4f36-a79e-ac65da8672ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://933bd113027e6a7ee2eba33df125dcdcf389c21b99c7dd32bc654edaf7278e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d21a06ad72a199d989c29326725c9d49df6fbb9fc6e9d54bcd1f7bb89c78b02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6d21a06ad72a199d989c29326725c9d49df6fbb9fc6e9d54bcd1f7bb89c78b02\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7216da1e17ab61329ca71325b40fdbd040dbf83f072d565302a571acb1313e53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7216da1e17ab61329ca71325b40fdbd040dbf83f072d565302a571acb1313e53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a466e70f0711cd7d64221f62309d99dfa9e4b9910a69b2397240cfdf244578\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3a466e70f0711cd7d64221f62309d99dfa9e4b9910a69b2397240cfdf244578\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:43:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a6a968fb55017d7e2fee878bb4d50904766820de016afd8f01de3bbd92b2421\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a6a968fb55017d7e2fee878bb4d50904766820de016afd8f01de3bbd92b2421\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:43:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:43:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b9v8b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:32Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:32 crc kubenswrapper[5133]: I1121 13:43:32.601907 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f826000-be5b-4f8f-bdc5-b80e11bb5e65\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cac876542527f108f89313704d6275aed6b735176f7f38b0fccbfcd79fdbf6e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa9a560543d545bd50cbb9aa0e907a992f9b3afb36de7ec5e72010dd835d2574\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c86055d41460f757efc29eaa62834faf3f14f9ca5ba534479d0fcd0a43d3bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bca102c9301a0963f4be54906a6ca418a585d0aa6b063a0512c2a334928f0d88\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T13:42:47Z\\\",\\\"message\\\":\\\"le observer\\\\nW1121 13:42:47.565555 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 13:42:47.567527 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 13:42:47.569658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3912986073/tls.crt::/tmp/serving-cert-3912986073/tls.key\\\\\\\"\\\\nI1121 13:42:47.852533 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 13:42:47.856751 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 13:42:47.856781 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 13:42:47.856814 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 13:42:47.856821 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 13:42:47.862211 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 13:42:47.862280 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862290 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862309 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 13:42:47.862319 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 13:42:47.862326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 13:42:47.862333 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 13:42:47.863057 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 13:42:47.865438 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6b16c41d8bc248fc4de65102a71d3875d1ab768432f61581605fa487ebfc9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:32Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:32 crc kubenswrapper[5133]: I1121 13:43:32.618848 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede95ef8b82acda5cadd081a37fcb2a35fab8269c7ec403bb33a6feb8bf9eb88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3037d1f01bc9704cae9aa3eb4760e4dc737b1990a6ae5a007d3ec412efad85a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:32Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:32 crc kubenswrapper[5133]: I1121 13:43:32.621494 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:32 crc kubenswrapper[5133]: I1121 13:43:32.621591 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:32 crc kubenswrapper[5133]: I1121 13:43:32.621617 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:32 crc kubenswrapper[5133]: I1121 13:43:32.621651 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:32 crc kubenswrapper[5133]: I1121 13:43:32.621675 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:32Z","lastTransitionTime":"2025-11-21T13:43:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:32 crc kubenswrapper[5133]: I1121 13:43:32.630217 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:32Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:32 crc kubenswrapper[5133]: I1121 13:43:32.644767 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:32Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:32 crc kubenswrapper[5133]: I1121 13:43:32.657852 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7vfdg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c337e24-9cef-4932-92ae-5a175379c77a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0eeb4cc8e3340d9eb9710ca7b55244d4055d6bc7cc31a6d227f233988b242823\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrzcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc648165e145d8ed6a43aff3ffb558380dc55ffd03816922b643bf7f740088fa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrzcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:43:05Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7vfdg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:32Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:32 crc kubenswrapper[5133]: I1121 13:43:32.673384 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75a51560-1657-43fa-880f-bece0b75e088\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1dc13dd497fa4611689b3c047c602511745dbff2b9a797f8ea7316046531717\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca37cc062dd8763ac13b5a07fcd06f2997b1ff9cfe38d9a9ff3091980d679932\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57552cf42d0ae179e9c1c67120613da40995fbe308e06dbd466d0f71167142b2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f00c6fbd3ed9ce5e2787b591f39bd80828b96dd53f15696adfe62c3df47ed47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f00c6fbd3ed9ce5e2787b591f39bd80828b96dd53f15696adfe62c3df47ed47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:32Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:32 crc kubenswrapper[5133]: I1121 13:43:32.690711 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:32Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:32 crc kubenswrapper[5133]: I1121 13:43:32.705964 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pvdwc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87822156-53e8-4eb5-b241-db506a21a1b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc374f8a6deccf60de941df327adc9f29d951a10746bd754c0d4f5573a141a71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jzt65\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pvdwc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:32Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:32 crc kubenswrapper[5133]: I1121 13:43:32.725104 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:32 crc kubenswrapper[5133]: I1121 13:43:32.725151 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:32 crc kubenswrapper[5133]: I1121 13:43:32.725165 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:32 crc kubenswrapper[5133]: I1121 13:43:32.725187 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:32 crc kubenswrapper[5133]: I1121 13:43:32.725204 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:32Z","lastTransitionTime":"2025-11-21T13:43:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:32 crc kubenswrapper[5133]: I1121 13:43:32.828358 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:32 crc kubenswrapper[5133]: I1121 13:43:32.828414 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:32 crc kubenswrapper[5133]: I1121 13:43:32.828425 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:32 crc kubenswrapper[5133]: I1121 13:43:32.828443 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:32 crc kubenswrapper[5133]: I1121 13:43:32.828454 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:32Z","lastTransitionTime":"2025-11-21T13:43:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:32 crc kubenswrapper[5133]: I1121 13:43:32.931020 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:32 crc kubenswrapper[5133]: I1121 13:43:32.931060 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:32 crc kubenswrapper[5133]: I1121 13:43:32.931069 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:32 crc kubenswrapper[5133]: I1121 13:43:32.931084 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:32 crc kubenswrapper[5133]: I1121 13:43:32.931095 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:32Z","lastTransitionTime":"2025-11-21T13:43:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:33 crc kubenswrapper[5133]: I1121 13:43:33.033814 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:33 crc kubenswrapper[5133]: I1121 13:43:33.033857 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:33 crc kubenswrapper[5133]: I1121 13:43:33.033868 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:33 crc kubenswrapper[5133]: I1121 13:43:33.033886 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:33 crc kubenswrapper[5133]: I1121 13:43:33.033900 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:33Z","lastTransitionTime":"2025-11-21T13:43:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:33 crc kubenswrapper[5133]: I1121 13:43:33.136122 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:33 crc kubenswrapper[5133]: I1121 13:43:33.136170 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:33 crc kubenswrapper[5133]: I1121 13:43:33.136180 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:33 crc kubenswrapper[5133]: I1121 13:43:33.136195 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:33 crc kubenswrapper[5133]: I1121 13:43:33.136205 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:33Z","lastTransitionTime":"2025-11-21T13:43:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:33 crc kubenswrapper[5133]: I1121 13:43:33.239074 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:33 crc kubenswrapper[5133]: I1121 13:43:33.239134 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:33 crc kubenswrapper[5133]: I1121 13:43:33.239153 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:33 crc kubenswrapper[5133]: I1121 13:43:33.239178 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:33 crc kubenswrapper[5133]: I1121 13:43:33.239197 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:33Z","lastTransitionTime":"2025-11-21T13:43:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:33 crc kubenswrapper[5133]: I1121 13:43:33.342954 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:33 crc kubenswrapper[5133]: I1121 13:43:33.343038 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:33 crc kubenswrapper[5133]: I1121 13:43:33.343054 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:33 crc kubenswrapper[5133]: I1121 13:43:33.343076 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:33 crc kubenswrapper[5133]: I1121 13:43:33.343141 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:33Z","lastTransitionTime":"2025-11-21T13:43:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:33 crc kubenswrapper[5133]: I1121 13:43:33.446795 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:33 crc kubenswrapper[5133]: I1121 13:43:33.446867 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:33 crc kubenswrapper[5133]: I1121 13:43:33.446884 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:33 crc kubenswrapper[5133]: I1121 13:43:33.446908 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:33 crc kubenswrapper[5133]: I1121 13:43:33.446928 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:33Z","lastTransitionTime":"2025-11-21T13:43:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:33 crc kubenswrapper[5133]: I1121 13:43:33.457585 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:43:33 crc kubenswrapper[5133]: I1121 13:43:33.457625 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-x5wnh" Nov 21 13:43:33 crc kubenswrapper[5133]: I1121 13:43:33.457730 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:43:33 crc kubenswrapper[5133]: E1121 13:43:33.457796 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 13:43:33 crc kubenswrapper[5133]: E1121 13:43:33.457979 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 13:43:33 crc kubenswrapper[5133]: E1121 13:43:33.458223 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-x5wnh" podUID="b3aabda0-97d9-4886-8909-1c423c4d3238" Nov 21 13:43:33 crc kubenswrapper[5133]: I1121 13:43:33.548969 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:33 crc kubenswrapper[5133]: I1121 13:43:33.549034 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:33 crc kubenswrapper[5133]: I1121 13:43:33.549063 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:33 crc kubenswrapper[5133]: I1121 13:43:33.549081 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:33 crc kubenswrapper[5133]: I1121 13:43:33.549093 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:33Z","lastTransitionTime":"2025-11-21T13:43:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:33 crc kubenswrapper[5133]: I1121 13:43:33.652971 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:33 crc kubenswrapper[5133]: I1121 13:43:33.653074 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:33 crc kubenswrapper[5133]: I1121 13:43:33.653088 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:33 crc kubenswrapper[5133]: I1121 13:43:33.653111 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:33 crc kubenswrapper[5133]: I1121 13:43:33.653127 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:33Z","lastTransitionTime":"2025-11-21T13:43:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:33 crc kubenswrapper[5133]: I1121 13:43:33.756527 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:33 crc kubenswrapper[5133]: I1121 13:43:33.756867 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:33 crc kubenswrapper[5133]: I1121 13:43:33.756875 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:33 crc kubenswrapper[5133]: I1121 13:43:33.756890 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:33 crc kubenswrapper[5133]: I1121 13:43:33.756900 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:33Z","lastTransitionTime":"2025-11-21T13:43:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:33 crc kubenswrapper[5133]: I1121 13:43:33.859075 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:33 crc kubenswrapper[5133]: I1121 13:43:33.859115 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:33 crc kubenswrapper[5133]: I1121 13:43:33.859126 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:33 crc kubenswrapper[5133]: I1121 13:43:33.859140 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:33 crc kubenswrapper[5133]: I1121 13:43:33.859151 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:33Z","lastTransitionTime":"2025-11-21T13:43:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:33 crc kubenswrapper[5133]: I1121 13:43:33.961702 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:33 crc kubenswrapper[5133]: I1121 13:43:33.961748 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:33 crc kubenswrapper[5133]: I1121 13:43:33.961757 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:33 crc kubenswrapper[5133]: I1121 13:43:33.961772 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:33 crc kubenswrapper[5133]: I1121 13:43:33.961782 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:33Z","lastTransitionTime":"2025-11-21T13:43:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:34 crc kubenswrapper[5133]: I1121 13:43:34.063899 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:34 crc kubenswrapper[5133]: I1121 13:43:34.063948 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:34 crc kubenswrapper[5133]: I1121 13:43:34.063956 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:34 crc kubenswrapper[5133]: I1121 13:43:34.063970 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:34 crc kubenswrapper[5133]: I1121 13:43:34.063981 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:34Z","lastTransitionTime":"2025-11-21T13:43:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:34 crc kubenswrapper[5133]: I1121 13:43:34.167649 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:34 crc kubenswrapper[5133]: I1121 13:43:34.167703 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:34 crc kubenswrapper[5133]: I1121 13:43:34.167720 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:34 crc kubenswrapper[5133]: I1121 13:43:34.167746 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:34 crc kubenswrapper[5133]: I1121 13:43:34.167764 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:34Z","lastTransitionTime":"2025-11-21T13:43:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:34 crc kubenswrapper[5133]: I1121 13:43:34.270956 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:34 crc kubenswrapper[5133]: I1121 13:43:34.271094 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:34 crc kubenswrapper[5133]: I1121 13:43:34.271130 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:34 crc kubenswrapper[5133]: I1121 13:43:34.271164 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:34 crc kubenswrapper[5133]: I1121 13:43:34.271190 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:34Z","lastTransitionTime":"2025-11-21T13:43:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:34 crc kubenswrapper[5133]: I1121 13:43:34.373930 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:34 crc kubenswrapper[5133]: I1121 13:43:34.373981 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:34 crc kubenswrapper[5133]: I1121 13:43:34.374017 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:34 crc kubenswrapper[5133]: I1121 13:43:34.374038 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:34 crc kubenswrapper[5133]: I1121 13:43:34.374052 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:34Z","lastTransitionTime":"2025-11-21T13:43:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:34 crc kubenswrapper[5133]: I1121 13:43:34.457034 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:43:34 crc kubenswrapper[5133]: E1121 13:43:34.457198 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 13:43:34 crc kubenswrapper[5133]: I1121 13:43:34.478126 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:34 crc kubenswrapper[5133]: I1121 13:43:34.478176 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:34 crc kubenswrapper[5133]: I1121 13:43:34.478187 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:34 crc kubenswrapper[5133]: I1121 13:43:34.478203 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:34 crc kubenswrapper[5133]: I1121 13:43:34.478215 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:34Z","lastTransitionTime":"2025-11-21T13:43:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:34 crc kubenswrapper[5133]: I1121 13:43:34.581668 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:34 crc kubenswrapper[5133]: I1121 13:43:34.581709 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:34 crc kubenswrapper[5133]: I1121 13:43:34.581721 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:34 crc kubenswrapper[5133]: I1121 13:43:34.581736 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:34 crc kubenswrapper[5133]: I1121 13:43:34.581747 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:34Z","lastTransitionTime":"2025-11-21T13:43:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:34 crc kubenswrapper[5133]: I1121 13:43:34.684898 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:34 crc kubenswrapper[5133]: I1121 13:43:34.684961 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:34 crc kubenswrapper[5133]: I1121 13:43:34.684973 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:34 crc kubenswrapper[5133]: I1121 13:43:34.684993 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:34 crc kubenswrapper[5133]: I1121 13:43:34.685022 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:34Z","lastTransitionTime":"2025-11-21T13:43:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:34 crc kubenswrapper[5133]: I1121 13:43:34.787287 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:34 crc kubenswrapper[5133]: I1121 13:43:34.787349 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:34 crc kubenswrapper[5133]: I1121 13:43:34.787368 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:34 crc kubenswrapper[5133]: I1121 13:43:34.787397 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:34 crc kubenswrapper[5133]: I1121 13:43:34.787420 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:34Z","lastTransitionTime":"2025-11-21T13:43:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:34 crc kubenswrapper[5133]: I1121 13:43:34.890514 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:34 crc kubenswrapper[5133]: I1121 13:43:34.890573 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:34 crc kubenswrapper[5133]: I1121 13:43:34.890583 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:34 crc kubenswrapper[5133]: I1121 13:43:34.890603 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:34 crc kubenswrapper[5133]: I1121 13:43:34.890613 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:34Z","lastTransitionTime":"2025-11-21T13:43:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:34 crc kubenswrapper[5133]: I1121 13:43:34.994393 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:34 crc kubenswrapper[5133]: I1121 13:43:34.994458 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:34 crc kubenswrapper[5133]: I1121 13:43:34.994481 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:34 crc kubenswrapper[5133]: I1121 13:43:34.994510 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:34 crc kubenswrapper[5133]: I1121 13:43:34.994531 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:34Z","lastTransitionTime":"2025-11-21T13:43:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:35 crc kubenswrapper[5133]: I1121 13:43:35.097446 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:35 crc kubenswrapper[5133]: I1121 13:43:35.097516 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:35 crc kubenswrapper[5133]: I1121 13:43:35.097537 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:35 crc kubenswrapper[5133]: I1121 13:43:35.097561 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:35 crc kubenswrapper[5133]: I1121 13:43:35.097583 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:35Z","lastTransitionTime":"2025-11-21T13:43:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:35 crc kubenswrapper[5133]: I1121 13:43:35.200052 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:35 crc kubenswrapper[5133]: I1121 13:43:35.200104 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:35 crc kubenswrapper[5133]: I1121 13:43:35.200116 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:35 crc kubenswrapper[5133]: I1121 13:43:35.200135 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:35 crc kubenswrapper[5133]: I1121 13:43:35.200148 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:35Z","lastTransitionTime":"2025-11-21T13:43:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:35 crc kubenswrapper[5133]: I1121 13:43:35.302673 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:35 crc kubenswrapper[5133]: I1121 13:43:35.302720 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:35 crc kubenswrapper[5133]: I1121 13:43:35.302739 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:35 crc kubenswrapper[5133]: I1121 13:43:35.302763 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:35 crc kubenswrapper[5133]: I1121 13:43:35.302781 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:35Z","lastTransitionTime":"2025-11-21T13:43:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:35 crc kubenswrapper[5133]: I1121 13:43:35.410937 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:35 crc kubenswrapper[5133]: I1121 13:43:35.411019 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:35 crc kubenswrapper[5133]: I1121 13:43:35.411186 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:35 crc kubenswrapper[5133]: I1121 13:43:35.411208 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:35 crc kubenswrapper[5133]: I1121 13:43:35.411223 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:35Z","lastTransitionTime":"2025-11-21T13:43:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:35 crc kubenswrapper[5133]: I1121 13:43:35.456501 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-x5wnh" Nov 21 13:43:35 crc kubenswrapper[5133]: I1121 13:43:35.456538 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:43:35 crc kubenswrapper[5133]: E1121 13:43:35.456628 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-x5wnh" podUID="b3aabda0-97d9-4886-8909-1c423c4d3238" Nov 21 13:43:35 crc kubenswrapper[5133]: I1121 13:43:35.456702 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:43:35 crc kubenswrapper[5133]: E1121 13:43:35.456845 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 13:43:35 crc kubenswrapper[5133]: E1121 13:43:35.457079 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 13:43:35 crc kubenswrapper[5133]: I1121 13:43:35.513789 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:35 crc kubenswrapper[5133]: I1121 13:43:35.513835 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:35 crc kubenswrapper[5133]: I1121 13:43:35.513853 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:35 crc kubenswrapper[5133]: I1121 13:43:35.513877 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:35 crc kubenswrapper[5133]: I1121 13:43:35.513894 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:35Z","lastTransitionTime":"2025-11-21T13:43:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:35 crc kubenswrapper[5133]: I1121 13:43:35.616349 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:35 crc kubenswrapper[5133]: I1121 13:43:35.616377 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:35 crc kubenswrapper[5133]: I1121 13:43:35.616385 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:35 crc kubenswrapper[5133]: I1121 13:43:35.616397 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:35 crc kubenswrapper[5133]: I1121 13:43:35.616406 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:35Z","lastTransitionTime":"2025-11-21T13:43:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:35 crc kubenswrapper[5133]: I1121 13:43:35.702387 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:35 crc kubenswrapper[5133]: I1121 13:43:35.702428 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:35 crc kubenswrapper[5133]: I1121 13:43:35.702437 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:35 crc kubenswrapper[5133]: I1121 13:43:35.702451 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:35 crc kubenswrapper[5133]: I1121 13:43:35.702460 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:35Z","lastTransitionTime":"2025-11-21T13:43:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:35 crc kubenswrapper[5133]: E1121 13:43:35.716514 5133 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"eb1f691e-5306-40d5-9666-4e51161aa15a\\\",\\\"systemUUID\\\":\\\"537cb059-79e6-48e5-b353-57bb495db8a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:35Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:35 crc kubenswrapper[5133]: I1121 13:43:35.720597 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:35 crc kubenswrapper[5133]: I1121 13:43:35.720665 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:35 crc kubenswrapper[5133]: I1121 13:43:35.720685 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:35 crc kubenswrapper[5133]: I1121 13:43:35.720711 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:35 crc kubenswrapper[5133]: I1121 13:43:35.720730 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:35Z","lastTransitionTime":"2025-11-21T13:43:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:35 crc kubenswrapper[5133]: E1121 13:43:35.742312 5133 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"eb1f691e-5306-40d5-9666-4e51161aa15a\\\",\\\"systemUUID\\\":\\\"537cb059-79e6-48e5-b353-57bb495db8a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:35Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:35 crc kubenswrapper[5133]: I1121 13:43:35.746523 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:35 crc kubenswrapper[5133]: I1121 13:43:35.746597 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:35 crc kubenswrapper[5133]: I1121 13:43:35.746614 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:35 crc kubenswrapper[5133]: I1121 13:43:35.746635 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:35 crc kubenswrapper[5133]: I1121 13:43:35.746650 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:35Z","lastTransitionTime":"2025-11-21T13:43:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:35 crc kubenswrapper[5133]: E1121 13:43:35.764224 5133 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"eb1f691e-5306-40d5-9666-4e51161aa15a\\\",\\\"systemUUID\\\":\\\"537cb059-79e6-48e5-b353-57bb495db8a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:35Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:35 crc kubenswrapper[5133]: I1121 13:43:35.768382 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:35 crc kubenswrapper[5133]: I1121 13:43:35.768428 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:35 crc kubenswrapper[5133]: I1121 13:43:35.768442 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:35 crc kubenswrapper[5133]: I1121 13:43:35.768461 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:35 crc kubenswrapper[5133]: I1121 13:43:35.768475 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:35Z","lastTransitionTime":"2025-11-21T13:43:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:35 crc kubenswrapper[5133]: E1121 13:43:35.782474 5133 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"eb1f691e-5306-40d5-9666-4e51161aa15a\\\",\\\"systemUUID\\\":\\\"537cb059-79e6-48e5-b353-57bb495db8a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:35Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:35 crc kubenswrapper[5133]: I1121 13:43:35.786598 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:35 crc kubenswrapper[5133]: I1121 13:43:35.786634 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:35 crc kubenswrapper[5133]: I1121 13:43:35.786644 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:35 crc kubenswrapper[5133]: I1121 13:43:35.786659 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:35 crc kubenswrapper[5133]: I1121 13:43:35.786670 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:35Z","lastTransitionTime":"2025-11-21T13:43:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:35 crc kubenswrapper[5133]: E1121 13:43:35.800507 5133 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"eb1f691e-5306-40d5-9666-4e51161aa15a\\\",\\\"systemUUID\\\":\\\"537cb059-79e6-48e5-b353-57bb495db8a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:35Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:35 crc kubenswrapper[5133]: E1121 13:43:35.800686 5133 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 21 13:43:35 crc kubenswrapper[5133]: I1121 13:43:35.802356 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:35 crc kubenswrapper[5133]: I1121 13:43:35.802397 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:35 crc kubenswrapper[5133]: I1121 13:43:35.802412 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:35 crc kubenswrapper[5133]: I1121 13:43:35.802432 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:35 crc kubenswrapper[5133]: I1121 13:43:35.802443 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:35Z","lastTransitionTime":"2025-11-21T13:43:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:35 crc kubenswrapper[5133]: I1121 13:43:35.905161 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:35 crc kubenswrapper[5133]: I1121 13:43:35.905210 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:35 crc kubenswrapper[5133]: I1121 13:43:35.905222 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:35 crc kubenswrapper[5133]: I1121 13:43:35.905241 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:35 crc kubenswrapper[5133]: I1121 13:43:35.905253 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:35Z","lastTransitionTime":"2025-11-21T13:43:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:36 crc kubenswrapper[5133]: I1121 13:43:36.008468 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:36 crc kubenswrapper[5133]: I1121 13:43:36.008521 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:36 crc kubenswrapper[5133]: I1121 13:43:36.008538 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:36 crc kubenswrapper[5133]: I1121 13:43:36.008560 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:36 crc kubenswrapper[5133]: I1121 13:43:36.008575 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:36Z","lastTransitionTime":"2025-11-21T13:43:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:36 crc kubenswrapper[5133]: I1121 13:43:36.111134 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:36 crc kubenswrapper[5133]: I1121 13:43:36.111199 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:36 crc kubenswrapper[5133]: I1121 13:43:36.111211 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:36 crc kubenswrapper[5133]: I1121 13:43:36.111232 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:36 crc kubenswrapper[5133]: I1121 13:43:36.111246 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:36Z","lastTransitionTime":"2025-11-21T13:43:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:36 crc kubenswrapper[5133]: I1121 13:43:36.213925 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:36 crc kubenswrapper[5133]: I1121 13:43:36.213970 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:36 crc kubenswrapper[5133]: I1121 13:43:36.213982 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:36 crc kubenswrapper[5133]: I1121 13:43:36.214024 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:36 crc kubenswrapper[5133]: I1121 13:43:36.214039 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:36Z","lastTransitionTime":"2025-11-21T13:43:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:36 crc kubenswrapper[5133]: I1121 13:43:36.316414 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:36 crc kubenswrapper[5133]: I1121 13:43:36.316487 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:36 crc kubenswrapper[5133]: I1121 13:43:36.316509 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:36 crc kubenswrapper[5133]: I1121 13:43:36.316539 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:36 crc kubenswrapper[5133]: I1121 13:43:36.316560 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:36Z","lastTransitionTime":"2025-11-21T13:43:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:36 crc kubenswrapper[5133]: I1121 13:43:36.419800 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:36 crc kubenswrapper[5133]: I1121 13:43:36.419840 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:36 crc kubenswrapper[5133]: I1121 13:43:36.419848 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:36 crc kubenswrapper[5133]: I1121 13:43:36.419863 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:36 crc kubenswrapper[5133]: I1121 13:43:36.419873 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:36Z","lastTransitionTime":"2025-11-21T13:43:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:36 crc kubenswrapper[5133]: I1121 13:43:36.457390 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:43:36 crc kubenswrapper[5133]: E1121 13:43:36.457994 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 13:43:36 crc kubenswrapper[5133]: I1121 13:43:36.458472 5133 scope.go:117] "RemoveContainer" containerID="e71d9a11d15c18007af7493702bac6d13cc2845f32b14f37d6252bac480dbc38" Nov 21 13:43:36 crc kubenswrapper[5133]: E1121 13:43:36.458846 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-tjzm8_openshift-ovn-kubernetes(373d5da7-fae9-4689-9ede-6e2d69a54c02)\"" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" podUID="373d5da7-fae9-4689-9ede-6e2d69a54c02" Nov 21 13:43:36 crc kubenswrapper[5133]: I1121 13:43:36.522237 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:36 crc kubenswrapper[5133]: I1121 13:43:36.522277 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:36 crc kubenswrapper[5133]: I1121 13:43:36.522285 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:36 crc kubenswrapper[5133]: I1121 13:43:36.522301 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:36 crc kubenswrapper[5133]: I1121 13:43:36.522311 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:36Z","lastTransitionTime":"2025-11-21T13:43:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:36 crc kubenswrapper[5133]: I1121 13:43:36.625038 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:36 crc kubenswrapper[5133]: I1121 13:43:36.625100 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:36 crc kubenswrapper[5133]: I1121 13:43:36.625112 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:36 crc kubenswrapper[5133]: I1121 13:43:36.625132 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:36 crc kubenswrapper[5133]: I1121 13:43:36.625146 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:36Z","lastTransitionTime":"2025-11-21T13:43:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:36 crc kubenswrapper[5133]: I1121 13:43:36.728517 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:36 crc kubenswrapper[5133]: I1121 13:43:36.728559 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:36 crc kubenswrapper[5133]: I1121 13:43:36.728568 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:36 crc kubenswrapper[5133]: I1121 13:43:36.728583 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:36 crc kubenswrapper[5133]: I1121 13:43:36.728592 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:36Z","lastTransitionTime":"2025-11-21T13:43:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:36 crc kubenswrapper[5133]: I1121 13:43:36.831189 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:36 crc kubenswrapper[5133]: I1121 13:43:36.831237 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:36 crc kubenswrapper[5133]: I1121 13:43:36.831246 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:36 crc kubenswrapper[5133]: I1121 13:43:36.831263 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:36 crc kubenswrapper[5133]: I1121 13:43:36.831273 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:36Z","lastTransitionTime":"2025-11-21T13:43:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:36 crc kubenswrapper[5133]: I1121 13:43:36.933558 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:36 crc kubenswrapper[5133]: I1121 13:43:36.933628 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:36 crc kubenswrapper[5133]: I1121 13:43:36.933639 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:36 crc kubenswrapper[5133]: I1121 13:43:36.933655 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:36 crc kubenswrapper[5133]: I1121 13:43:36.933666 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:36Z","lastTransitionTime":"2025-11-21T13:43:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:37 crc kubenswrapper[5133]: I1121 13:43:37.036312 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:37 crc kubenswrapper[5133]: I1121 13:43:37.036411 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:37 crc kubenswrapper[5133]: I1121 13:43:37.036427 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:37 crc kubenswrapper[5133]: I1121 13:43:37.036452 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:37 crc kubenswrapper[5133]: I1121 13:43:37.036472 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:37Z","lastTransitionTime":"2025-11-21T13:43:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:37 crc kubenswrapper[5133]: I1121 13:43:37.138468 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:37 crc kubenswrapper[5133]: I1121 13:43:37.138505 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:37 crc kubenswrapper[5133]: I1121 13:43:37.138516 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:37 crc kubenswrapper[5133]: I1121 13:43:37.138531 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:37 crc kubenswrapper[5133]: I1121 13:43:37.138545 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:37Z","lastTransitionTime":"2025-11-21T13:43:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:37 crc kubenswrapper[5133]: I1121 13:43:37.240486 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:37 crc kubenswrapper[5133]: I1121 13:43:37.240544 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:37 crc kubenswrapper[5133]: I1121 13:43:37.240553 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:37 crc kubenswrapper[5133]: I1121 13:43:37.240568 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:37 crc kubenswrapper[5133]: I1121 13:43:37.240580 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:37Z","lastTransitionTime":"2025-11-21T13:43:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:37 crc kubenswrapper[5133]: I1121 13:43:37.343621 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:37 crc kubenswrapper[5133]: I1121 13:43:37.343663 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:37 crc kubenswrapper[5133]: I1121 13:43:37.343675 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:37 crc kubenswrapper[5133]: I1121 13:43:37.343690 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:37 crc kubenswrapper[5133]: I1121 13:43:37.343702 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:37Z","lastTransitionTime":"2025-11-21T13:43:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:37 crc kubenswrapper[5133]: I1121 13:43:37.447094 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:37 crc kubenswrapper[5133]: I1121 13:43:37.447155 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:37 crc kubenswrapper[5133]: I1121 13:43:37.447164 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:37 crc kubenswrapper[5133]: I1121 13:43:37.447177 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:37 crc kubenswrapper[5133]: I1121 13:43:37.447186 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:37Z","lastTransitionTime":"2025-11-21T13:43:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:37 crc kubenswrapper[5133]: I1121 13:43:37.457530 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-x5wnh" Nov 21 13:43:37 crc kubenswrapper[5133]: I1121 13:43:37.457620 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:43:37 crc kubenswrapper[5133]: E1121 13:43:37.457705 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-x5wnh" podUID="b3aabda0-97d9-4886-8909-1c423c4d3238" Nov 21 13:43:37 crc kubenswrapper[5133]: I1121 13:43:37.457797 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:43:37 crc kubenswrapper[5133]: E1121 13:43:37.457874 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 13:43:37 crc kubenswrapper[5133]: E1121 13:43:37.458181 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 13:43:37 crc kubenswrapper[5133]: I1121 13:43:37.472475 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Nov 21 13:43:37 crc kubenswrapper[5133]: I1121 13:43:37.549583 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:37 crc kubenswrapper[5133]: I1121 13:43:37.549616 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:37 crc kubenswrapper[5133]: I1121 13:43:37.549626 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:37 crc kubenswrapper[5133]: I1121 13:43:37.549640 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:37 crc kubenswrapper[5133]: I1121 13:43:37.549648 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:37Z","lastTransitionTime":"2025-11-21T13:43:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:37 crc kubenswrapper[5133]: I1121 13:43:37.652065 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:37 crc kubenswrapper[5133]: I1121 13:43:37.652111 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:37 crc kubenswrapper[5133]: I1121 13:43:37.652126 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:37 crc kubenswrapper[5133]: I1121 13:43:37.652141 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:37 crc kubenswrapper[5133]: I1121 13:43:37.652155 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:37Z","lastTransitionTime":"2025-11-21T13:43:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:37 crc kubenswrapper[5133]: I1121 13:43:37.755541 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:37 crc kubenswrapper[5133]: I1121 13:43:37.755578 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:37 crc kubenswrapper[5133]: I1121 13:43:37.755590 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:37 crc kubenswrapper[5133]: I1121 13:43:37.755606 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:37 crc kubenswrapper[5133]: I1121 13:43:37.755618 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:37Z","lastTransitionTime":"2025-11-21T13:43:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:37 crc kubenswrapper[5133]: I1121 13:43:37.858510 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:37 crc kubenswrapper[5133]: I1121 13:43:37.858560 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:37 crc kubenswrapper[5133]: I1121 13:43:37.858571 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:37 crc kubenswrapper[5133]: I1121 13:43:37.858587 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:37 crc kubenswrapper[5133]: I1121 13:43:37.858600 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:37Z","lastTransitionTime":"2025-11-21T13:43:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:37 crc kubenswrapper[5133]: I1121 13:43:37.961924 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:37 crc kubenswrapper[5133]: I1121 13:43:37.961980 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:37 crc kubenswrapper[5133]: I1121 13:43:37.961991 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:37 crc kubenswrapper[5133]: I1121 13:43:37.962027 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:37 crc kubenswrapper[5133]: I1121 13:43:37.962039 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:37Z","lastTransitionTime":"2025-11-21T13:43:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:38 crc kubenswrapper[5133]: I1121 13:43:38.065517 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:38 crc kubenswrapper[5133]: I1121 13:43:38.065562 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:38 crc kubenswrapper[5133]: I1121 13:43:38.065570 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:38 crc kubenswrapper[5133]: I1121 13:43:38.065582 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:38 crc kubenswrapper[5133]: I1121 13:43:38.065592 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:38Z","lastTransitionTime":"2025-11-21T13:43:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:38 crc kubenswrapper[5133]: I1121 13:43:38.169220 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:38 crc kubenswrapper[5133]: I1121 13:43:38.169284 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:38 crc kubenswrapper[5133]: I1121 13:43:38.169297 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:38 crc kubenswrapper[5133]: I1121 13:43:38.169324 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:38 crc kubenswrapper[5133]: I1121 13:43:38.169343 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:38Z","lastTransitionTime":"2025-11-21T13:43:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:38 crc kubenswrapper[5133]: I1121 13:43:38.271858 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:38 crc kubenswrapper[5133]: I1121 13:43:38.271924 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:38 crc kubenswrapper[5133]: I1121 13:43:38.271946 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:38 crc kubenswrapper[5133]: I1121 13:43:38.271965 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:38 crc kubenswrapper[5133]: I1121 13:43:38.271977 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:38Z","lastTransitionTime":"2025-11-21T13:43:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:38 crc kubenswrapper[5133]: I1121 13:43:38.373701 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:38 crc kubenswrapper[5133]: I1121 13:43:38.373744 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:38 crc kubenswrapper[5133]: I1121 13:43:38.373755 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:38 crc kubenswrapper[5133]: I1121 13:43:38.373770 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:38 crc kubenswrapper[5133]: I1121 13:43:38.373780 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:38Z","lastTransitionTime":"2025-11-21T13:43:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:38 crc kubenswrapper[5133]: I1121 13:43:38.457625 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:43:38 crc kubenswrapper[5133]: E1121 13:43:38.457801 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 13:43:38 crc kubenswrapper[5133]: I1121 13:43:38.475939 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:38 crc kubenswrapper[5133]: I1121 13:43:38.475988 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:38 crc kubenswrapper[5133]: I1121 13:43:38.476015 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:38 crc kubenswrapper[5133]: I1121 13:43:38.476033 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:38 crc kubenswrapper[5133]: I1121 13:43:38.476045 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:38Z","lastTransitionTime":"2025-11-21T13:43:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:38 crc kubenswrapper[5133]: I1121 13:43:38.579598 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:38 crc kubenswrapper[5133]: I1121 13:43:38.579664 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:38 crc kubenswrapper[5133]: I1121 13:43:38.579687 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:38 crc kubenswrapper[5133]: I1121 13:43:38.579715 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:38 crc kubenswrapper[5133]: I1121 13:43:38.579736 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:38Z","lastTransitionTime":"2025-11-21T13:43:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:38 crc kubenswrapper[5133]: I1121 13:43:38.682388 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:38 crc kubenswrapper[5133]: I1121 13:43:38.682451 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:38 crc kubenswrapper[5133]: I1121 13:43:38.682468 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:38 crc kubenswrapper[5133]: I1121 13:43:38.682489 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:38 crc kubenswrapper[5133]: I1121 13:43:38.682503 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:38Z","lastTransitionTime":"2025-11-21T13:43:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:38 crc kubenswrapper[5133]: I1121 13:43:38.785844 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:38 crc kubenswrapper[5133]: I1121 13:43:38.785905 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:38 crc kubenswrapper[5133]: I1121 13:43:38.785924 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:38 crc kubenswrapper[5133]: I1121 13:43:38.785948 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:38 crc kubenswrapper[5133]: I1121 13:43:38.785981 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:38Z","lastTransitionTime":"2025-11-21T13:43:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:38 crc kubenswrapper[5133]: I1121 13:43:38.889126 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:38 crc kubenswrapper[5133]: I1121 13:43:38.889219 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:38 crc kubenswrapper[5133]: I1121 13:43:38.889257 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:38 crc kubenswrapper[5133]: I1121 13:43:38.889289 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:38 crc kubenswrapper[5133]: I1121 13:43:38.889311 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:38Z","lastTransitionTime":"2025-11-21T13:43:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:38 crc kubenswrapper[5133]: I1121 13:43:38.992165 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:38 crc kubenswrapper[5133]: I1121 13:43:38.992242 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:38 crc kubenswrapper[5133]: I1121 13:43:38.992265 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:38 crc kubenswrapper[5133]: I1121 13:43:38.992295 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:38 crc kubenswrapper[5133]: I1121 13:43:38.992323 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:38Z","lastTransitionTime":"2025-11-21T13:43:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:39 crc kubenswrapper[5133]: I1121 13:43:39.094508 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:39 crc kubenswrapper[5133]: I1121 13:43:39.094549 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:39 crc kubenswrapper[5133]: I1121 13:43:39.094561 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:39 crc kubenswrapper[5133]: I1121 13:43:39.094578 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:39 crc kubenswrapper[5133]: I1121 13:43:39.094594 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:39Z","lastTransitionTime":"2025-11-21T13:43:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:39 crc kubenswrapper[5133]: I1121 13:43:39.197302 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:39 crc kubenswrapper[5133]: I1121 13:43:39.197376 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:39 crc kubenswrapper[5133]: I1121 13:43:39.197394 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:39 crc kubenswrapper[5133]: I1121 13:43:39.197420 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:39 crc kubenswrapper[5133]: I1121 13:43:39.197464 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:39Z","lastTransitionTime":"2025-11-21T13:43:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:39 crc kubenswrapper[5133]: I1121 13:43:39.218693 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b3aabda0-97d9-4886-8909-1c423c4d3238-metrics-certs\") pod \"network-metrics-daemon-x5wnh\" (UID: \"b3aabda0-97d9-4886-8909-1c423c4d3238\") " pod="openshift-multus/network-metrics-daemon-x5wnh" Nov 21 13:43:39 crc kubenswrapper[5133]: E1121 13:43:39.218947 5133 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 21 13:43:39 crc kubenswrapper[5133]: E1121 13:43:39.219071 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b3aabda0-97d9-4886-8909-1c423c4d3238-metrics-certs podName:b3aabda0-97d9-4886-8909-1c423c4d3238 nodeName:}" failed. No retries permitted until 2025-11-21 13:44:11.219042576 +0000 UTC m=+111.016874854 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/b3aabda0-97d9-4886-8909-1c423c4d3238-metrics-certs") pod "network-metrics-daemon-x5wnh" (UID: "b3aabda0-97d9-4886-8909-1c423c4d3238") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 21 13:43:39 crc kubenswrapper[5133]: I1121 13:43:39.300641 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:39 crc kubenswrapper[5133]: I1121 13:43:39.300712 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:39 crc kubenswrapper[5133]: I1121 13:43:39.300730 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:39 crc kubenswrapper[5133]: I1121 13:43:39.300754 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:39 crc kubenswrapper[5133]: I1121 13:43:39.300774 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:39Z","lastTransitionTime":"2025-11-21T13:43:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:39 crc kubenswrapper[5133]: I1121 13:43:39.403070 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:39 crc kubenswrapper[5133]: I1121 13:43:39.403122 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:39 crc kubenswrapper[5133]: I1121 13:43:39.403138 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:39 crc kubenswrapper[5133]: I1121 13:43:39.403159 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:39 crc kubenswrapper[5133]: I1121 13:43:39.403174 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:39Z","lastTransitionTime":"2025-11-21T13:43:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:39 crc kubenswrapper[5133]: I1121 13:43:39.457549 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-x5wnh" Nov 21 13:43:39 crc kubenswrapper[5133]: I1121 13:43:39.457630 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:43:39 crc kubenswrapper[5133]: E1121 13:43:39.457707 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-x5wnh" podUID="b3aabda0-97d9-4886-8909-1c423c4d3238" Nov 21 13:43:39 crc kubenswrapper[5133]: I1121 13:43:39.457787 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:43:39 crc kubenswrapper[5133]: E1121 13:43:39.457960 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 13:43:39 crc kubenswrapper[5133]: E1121 13:43:39.458121 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 13:43:39 crc kubenswrapper[5133]: I1121 13:43:39.506378 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:39 crc kubenswrapper[5133]: I1121 13:43:39.506420 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:39 crc kubenswrapper[5133]: I1121 13:43:39.506431 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:39 crc kubenswrapper[5133]: I1121 13:43:39.506447 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:39 crc kubenswrapper[5133]: I1121 13:43:39.506459 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:39Z","lastTransitionTime":"2025-11-21T13:43:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:39 crc kubenswrapper[5133]: I1121 13:43:39.608591 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:39 crc kubenswrapper[5133]: I1121 13:43:39.608662 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:39 crc kubenswrapper[5133]: I1121 13:43:39.608678 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:39 crc kubenswrapper[5133]: I1121 13:43:39.608713 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:39 crc kubenswrapper[5133]: I1121 13:43:39.608733 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:39Z","lastTransitionTime":"2025-11-21T13:43:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:39 crc kubenswrapper[5133]: I1121 13:43:39.711458 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:39 crc kubenswrapper[5133]: I1121 13:43:39.711535 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:39 crc kubenswrapper[5133]: I1121 13:43:39.711553 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:39 crc kubenswrapper[5133]: I1121 13:43:39.711580 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:39 crc kubenswrapper[5133]: I1121 13:43:39.711599 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:39Z","lastTransitionTime":"2025-11-21T13:43:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:39 crc kubenswrapper[5133]: I1121 13:43:39.814456 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:39 crc kubenswrapper[5133]: I1121 13:43:39.814516 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:39 crc kubenswrapper[5133]: I1121 13:43:39.814531 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:39 crc kubenswrapper[5133]: I1121 13:43:39.814546 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:39 crc kubenswrapper[5133]: I1121 13:43:39.814555 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:39Z","lastTransitionTime":"2025-11-21T13:43:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:39 crc kubenswrapper[5133]: I1121 13:43:39.917820 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:39 crc kubenswrapper[5133]: I1121 13:43:39.917892 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:39 crc kubenswrapper[5133]: I1121 13:43:39.917910 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:39 crc kubenswrapper[5133]: I1121 13:43:39.917934 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:39 crc kubenswrapper[5133]: I1121 13:43:39.917950 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:39Z","lastTransitionTime":"2025-11-21T13:43:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:40 crc kubenswrapper[5133]: I1121 13:43:40.021288 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:40 crc kubenswrapper[5133]: I1121 13:43:40.021345 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:40 crc kubenswrapper[5133]: I1121 13:43:40.021356 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:40 crc kubenswrapper[5133]: I1121 13:43:40.021391 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:40 crc kubenswrapper[5133]: I1121 13:43:40.021403 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:40Z","lastTransitionTime":"2025-11-21T13:43:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:40 crc kubenswrapper[5133]: I1121 13:43:40.124478 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:40 crc kubenswrapper[5133]: I1121 13:43:40.124540 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:40 crc kubenswrapper[5133]: I1121 13:43:40.124550 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:40 crc kubenswrapper[5133]: I1121 13:43:40.124585 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:40 crc kubenswrapper[5133]: I1121 13:43:40.124600 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:40Z","lastTransitionTime":"2025-11-21T13:43:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:40 crc kubenswrapper[5133]: I1121 13:43:40.228449 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:40 crc kubenswrapper[5133]: I1121 13:43:40.228485 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:40 crc kubenswrapper[5133]: I1121 13:43:40.228495 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:40 crc kubenswrapper[5133]: I1121 13:43:40.228542 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:40 crc kubenswrapper[5133]: I1121 13:43:40.228555 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:40Z","lastTransitionTime":"2025-11-21T13:43:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:40 crc kubenswrapper[5133]: I1121 13:43:40.332268 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:40 crc kubenswrapper[5133]: I1121 13:43:40.332338 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:40 crc kubenswrapper[5133]: I1121 13:43:40.332355 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:40 crc kubenswrapper[5133]: I1121 13:43:40.332381 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:40 crc kubenswrapper[5133]: I1121 13:43:40.332398 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:40Z","lastTransitionTime":"2025-11-21T13:43:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:40 crc kubenswrapper[5133]: I1121 13:43:40.434175 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:40 crc kubenswrapper[5133]: I1121 13:43:40.434240 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:40 crc kubenswrapper[5133]: I1121 13:43:40.434258 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:40 crc kubenswrapper[5133]: I1121 13:43:40.434283 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:40 crc kubenswrapper[5133]: I1121 13:43:40.434301 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:40Z","lastTransitionTime":"2025-11-21T13:43:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:40 crc kubenswrapper[5133]: I1121 13:43:40.457461 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:43:40 crc kubenswrapper[5133]: E1121 13:43:40.457627 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 13:43:40 crc kubenswrapper[5133]: I1121 13:43:40.536900 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:40 crc kubenswrapper[5133]: I1121 13:43:40.536931 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:40 crc kubenswrapper[5133]: I1121 13:43:40.536974 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:40 crc kubenswrapper[5133]: I1121 13:43:40.536987 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:40 crc kubenswrapper[5133]: I1121 13:43:40.537012 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:40Z","lastTransitionTime":"2025-11-21T13:43:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:40 crc kubenswrapper[5133]: I1121 13:43:40.639094 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:40 crc kubenswrapper[5133]: I1121 13:43:40.639155 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:40 crc kubenswrapper[5133]: I1121 13:43:40.639172 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:40 crc kubenswrapper[5133]: I1121 13:43:40.639199 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:40 crc kubenswrapper[5133]: I1121 13:43:40.639216 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:40Z","lastTransitionTime":"2025-11-21T13:43:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:40 crc kubenswrapper[5133]: I1121 13:43:40.742992 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:40 crc kubenswrapper[5133]: I1121 13:43:40.743114 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:40 crc kubenswrapper[5133]: I1121 13:43:40.743136 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:40 crc kubenswrapper[5133]: I1121 13:43:40.743165 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:40 crc kubenswrapper[5133]: I1121 13:43:40.743184 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:40Z","lastTransitionTime":"2025-11-21T13:43:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:40 crc kubenswrapper[5133]: I1121 13:43:40.846771 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:40 crc kubenswrapper[5133]: I1121 13:43:40.846834 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:40 crc kubenswrapper[5133]: I1121 13:43:40.846853 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:40 crc kubenswrapper[5133]: I1121 13:43:40.846879 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:40 crc kubenswrapper[5133]: I1121 13:43:40.846898 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:40Z","lastTransitionTime":"2025-11-21T13:43:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:40 crc kubenswrapper[5133]: I1121 13:43:40.948850 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:40 crc kubenswrapper[5133]: I1121 13:43:40.948943 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:40 crc kubenswrapper[5133]: I1121 13:43:40.948962 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:40 crc kubenswrapper[5133]: I1121 13:43:40.948992 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:40 crc kubenswrapper[5133]: I1121 13:43:40.949049 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:40Z","lastTransitionTime":"2025-11-21T13:43:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.022078 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-m5d24_0077329a-abad-4c6d-a601-2dc01fd83184/kube-multus/0.log" Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.022161 5133 generic.go:334] "Generic (PLEG): container finished" podID="0077329a-abad-4c6d-a601-2dc01fd83184" containerID="24fe246ff402a8854ee5e55ccc507a2e497dbb2cdfed3f0f8b380f00b9436661" exitCode=1 Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.022218 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-m5d24" event={"ID":"0077329a-abad-4c6d-a601-2dc01fd83184","Type":"ContainerDied","Data":"24fe246ff402a8854ee5e55ccc507a2e497dbb2cdfed3f0f8b380f00b9436661"} Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.022981 5133 scope.go:117] "RemoveContainer" containerID="24fe246ff402a8854ee5e55ccc507a2e497dbb2cdfed3f0f8b380f00b9436661" Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.046468 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c335cd8-618b-4871-a0e2-deaa61ddc49a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5f6320dbfb8d910e52de319fe5350b435c1c9f00a2e1d5b2b953fb6d1688984\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://273a23deb0bee7d80bc12f28a4056a5b843e81cc7c411273e49c3aa0fdba5182\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c93dddad8f7a853e1302ba96f3fc6d6626b22de64c8cfd1ee63996820d0816cd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebb7634a6507b2323d36c3d57b19c374862e0bada0e81150da9db315e5812f12\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:41Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.052973 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.053056 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.053075 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.053100 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.053118 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:41Z","lastTransitionTime":"2025-11-21T13:43:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.070765 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1b5e12d17b3e683349818698223816569514a9f4ae5d14ba1f5661c472fce39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:41Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.088050 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bj52j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9cc533c-2914-45d2-97b4-d6e35361450d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b1cfd49e0e5564696bd26f92acb10ca3430f81dc3f690a51ecd7bfa14876bccb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdp6q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bj52j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:41Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.105517 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-x5wnh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3aabda0-97d9-4886-8909-1c423c4d3238\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p8kp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p8kp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:43:07Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-x5wnh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:41Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.129104 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"373d5da7-fae9-4689-9ede-6e2d69a54c02\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce076f27563e648bcbfd183634e87e0e31cedc359d0df1edc6af448b2a18f1a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b6bfce121246f367a034c172b839a31fe309cfc0f83db7ab4e48cb26d6a5145\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c53aca99f41348a8343f7a2a2afd9ca78e2e4ba6aae9bb06cdb3ed66c9d79aa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aee1dbbc6cd90fac255e86ddb27f159eba2e08dc6cc749a8eb351842330ee6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://563b9e061f37ddab57173a01efbf7bf025c470edccc47a03e7c5bb1e317a289f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd5003cc4327d8234259623232e844463af0efdc0b3e395fa3e2c30c714b872d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e71d9a11d15c18007af7493702bac6d13cc2845f32b14f37d6252bac480dbc38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e71d9a11d15c18007af7493702bac6d13cc2845f32b14f37d6252bac480dbc38\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T13:43:23Z\\\",\\\"message\\\":\\\"Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.110\\\\\\\", Port:8443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1121 13:43:23.558190 6798 ovn.go:134] Ensuring zone local for Pod openshift-multus/multus-additional-cni-plugins-b9v8b in node crc\\\\nI1121 13:43:23.558832 6798 obj_retry.go:386] Retry successful for *v1.Pod openshift-multus/multus-additional-cni-plugins-b9v8b after 0 failed attempt(s)\\\\nI1121 13:43:23.558866 6798 default_network_controller.go:776] Recording success event on pod openshift-multus/multus-additional-cni-plugins-b9v8b\\\\nI1121 13:43:23.558216 6798 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/multus-m5d24\\\\nI1121 13:43:23.558892 6798 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/multus-m5d24\\\\nI1121 13:43:23.558904 6798 ovn.go:134] Ensuring zone local for Pod openshift-multus/multus-m5d24 in node crc\\\\nI1121 13:43:23.558913 6798 obj_retry.go:386] Retry successful for *v1.Pod openshift-multus/multus-m5d24 after 0 failed attempt(s)\\\\nI1121 13:43:23.558921 6798 default_network_controller.go:776] Recordin\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:43:22Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-tjzm8_openshift-ovn-kubernetes(373d5da7-fae9-4689-9ede-6e2d69a54c02)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ab3fdf87c8fc052cd429333579ede0e857fcc8399de947f661b159e6a5f2a93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tjzm8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:41Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.146225 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b9v8b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0ec3a98-4d89-4f36-a79e-ac65da8672ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://933bd113027e6a7ee2eba33df125dcdcf389c21b99c7dd32bc654edaf7278e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d21a06ad72a199d989c29326725c9d49df6fbb9fc6e9d54bcd1f7bb89c78b02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6d21a06ad72a199d989c29326725c9d49df6fbb9fc6e9d54bcd1f7bb89c78b02\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7216da1e17ab61329ca71325b40fdbd040dbf83f072d565302a571acb1313e53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7216da1e17ab61329ca71325b40fdbd040dbf83f072d565302a571acb1313e53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a466e70f0711cd7d64221f62309d99dfa9e4b9910a69b2397240cfdf244578\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3a466e70f0711cd7d64221f62309d99dfa9e4b9910a69b2397240cfdf244578\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:43:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a6a968fb55017d7e2fee878bb4d50904766820de016afd8f01de3bbd92b2421\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a6a968fb55017d7e2fee878bb4d50904766820de016afd8f01de3bbd92b2421\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:43:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:43:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b9v8b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:41Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.155889 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.155918 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.155926 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.155940 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.155949 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:41Z","lastTransitionTime":"2025-11-21T13:43:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.162607 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f826000-be5b-4f8f-bdc5-b80e11bb5e65\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cac876542527f108f89313704d6275aed6b735176f7f38b0fccbfcd79fdbf6e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa9a560543d545bd50cbb9aa0e907a992f9b3afb36de7ec5e72010dd835d2574\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c86055d41460f757efc29eaa62834faf3f14f9ca5ba534479d0fcd0a43d3bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bca102c9301a0963f4be54906a6ca418a585d0aa6b063a0512c2a334928f0d88\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T13:42:47Z\\\",\\\"message\\\":\\\"le observer\\\\nW1121 13:42:47.565555 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 13:42:47.567527 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 13:42:47.569658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3912986073/tls.crt::/tmp/serving-cert-3912986073/tls.key\\\\\\\"\\\\nI1121 13:42:47.852533 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 13:42:47.856751 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 13:42:47.856781 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 13:42:47.856814 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 13:42:47.856821 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 13:42:47.862211 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 13:42:47.862280 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862290 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862309 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 13:42:47.862319 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 13:42:47.862326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 13:42:47.862333 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 13:42:47.863057 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 13:42:47.865438 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6b16c41d8bc248fc4de65102a71d3875d1ab768432f61581605fa487ebfc9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:41Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.178068 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede95ef8b82acda5cadd081a37fcb2a35fab8269c7ec403bb33a6feb8bf9eb88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3037d1f01bc9704cae9aa3eb4760e4dc737b1990a6ae5a007d3ec412efad85a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:41Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.191989 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:41Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.206771 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1236e5c5c7c8db59fd2faa688e9b781fe94721cc8aa644dd9ab91df2684c617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:41Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.225763 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m5d24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0077329a-abad-4c6d-a601-2dc01fd83184\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://24fe246ff402a8854ee5e55ccc507a2e497dbb2cdfed3f0f8b380f00b9436661\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://24fe246ff402a8854ee5e55ccc507a2e497dbb2cdfed3f0f8b380f00b9436661\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T13:43:40Z\\\",\\\"message\\\":\\\"2025-11-21T13:42:55+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_479ae883-3ec6-417b-ad49-d387b81ff874\\\\n2025-11-21T13:42:55+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_479ae883-3ec6-417b-ad49-d387b81ff874 to /host/opt/cni/bin/\\\\n2025-11-21T13:42:55Z [verbose] multus-daemon started\\\\n2025-11-21T13:42:55Z [verbose] Readiness Indicator file check\\\\n2025-11-21T13:43:40Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmd8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m5d24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:41Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.237875 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f5a729-05d1-4f84-a216-1df3233af57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c46519115b067feef9d8fb5783b8b9420bf99d97515021a7d389e6cdf1d64112\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e5c730e837240b2ed45dff8a5411b8b49d21e7fbfb2dfcc6aef568b73b57745\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xxlvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:41Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.249828 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75a51560-1657-43fa-880f-bece0b75e088\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1dc13dd497fa4611689b3c047c602511745dbff2b9a797f8ea7316046531717\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca37cc062dd8763ac13b5a07fcd06f2997b1ff9cfe38d9a9ff3091980d679932\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57552cf42d0ae179e9c1c67120613da40995fbe308e06dbd466d0f71167142b2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f00c6fbd3ed9ce5e2787b591f39bd80828b96dd53f15696adfe62c3df47ed47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f00c6fbd3ed9ce5e2787b591f39bd80828b96dd53f15696adfe62c3df47ed47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:41Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.258618 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.258651 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.258663 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.258679 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.258690 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:41Z","lastTransitionTime":"2025-11-21T13:43:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.262500 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e4ae5c5-33df-4caa-9e92-c7070da4d494\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e8fe024377d7d7d21f5e1826640081a7cada1c3ac389b94deccdc80360afc56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e019adb31c6276b82ca39edac3f8d20a563baeb5bc34dbd64df79c9ae690b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3e019adb31c6276b82ca39edac3f8d20a563baeb5bc34dbd64df79c9ae690b5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:41Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.274613 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:41Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.285158 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7vfdg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c337e24-9cef-4932-92ae-5a175379c77a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0eeb4cc8e3340d9eb9710ca7b55244d4055d6bc7cc31a6d227f233988b242823\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrzcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc648165e145d8ed6a43aff3ffb558380dc55ffd03816922b643bf7f740088fa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrzcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:43:05Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7vfdg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:41Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.297487 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:41Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.306695 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pvdwc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87822156-53e8-4eb5-b241-db506a21a1b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc374f8a6deccf60de941df327adc9f29d951a10746bd754c0d4f5573a141a71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jzt65\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pvdwc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:41Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.361612 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.361663 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.361674 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.361690 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.361701 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:41Z","lastTransitionTime":"2025-11-21T13:43:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.456937 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:43:41 crc kubenswrapper[5133]: E1121 13:43:41.457109 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.456952 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-x5wnh" Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.456935 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:43:41 crc kubenswrapper[5133]: E1121 13:43:41.457214 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-x5wnh" podUID="b3aabda0-97d9-4886-8909-1c423c4d3238" Nov 21 13:43:41 crc kubenswrapper[5133]: E1121 13:43:41.457474 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.464899 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.465119 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.465313 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.465331 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.465341 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:41Z","lastTransitionTime":"2025-11-21T13:43:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.569212 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.569298 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.569320 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.569357 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.569381 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:41Z","lastTransitionTime":"2025-11-21T13:43:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.671830 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.671882 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.671893 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.671909 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.671920 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:41Z","lastTransitionTime":"2025-11-21T13:43:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.774785 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.774875 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.774901 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.774930 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.774953 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:41Z","lastTransitionTime":"2025-11-21T13:43:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.878415 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.878468 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.878483 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.878502 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.878514 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:41Z","lastTransitionTime":"2025-11-21T13:43:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.980506 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.980547 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.980555 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.980568 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:41 crc kubenswrapper[5133]: I1121 13:43:41.980577 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:41Z","lastTransitionTime":"2025-11-21T13:43:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.027420 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-m5d24_0077329a-abad-4c6d-a601-2dc01fd83184/kube-multus/0.log" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.027497 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-m5d24" event={"ID":"0077329a-abad-4c6d-a601-2dc01fd83184","Type":"ContainerStarted","Data":"49d78ec51ff96fc1ff9c0e668751296475767d8bf09673ff5d0f1a9d896d6595"} Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.062249 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"373d5da7-fae9-4689-9ede-6e2d69a54c02\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce076f27563e648bcbfd183634e87e0e31cedc359d0df1edc6af448b2a18f1a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b6bfce121246f367a034c172b839a31fe309cfc0f83db7ab4e48cb26d6a5145\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c53aca99f41348a8343f7a2a2afd9ca78e2e4ba6aae9bb06cdb3ed66c9d79aa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aee1dbbc6cd90fac255e86ddb27f159eba2e08dc6cc749a8eb351842330ee6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://563b9e061f37ddab57173a01efbf7bf025c470edccc47a03e7c5bb1e317a289f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd5003cc4327d8234259623232e844463af0efdc0b3e395fa3e2c30c714b872d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e71d9a11d15c18007af7493702bac6d13cc2845f32b14f37d6252bac480dbc38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e71d9a11d15c18007af7493702bac6d13cc2845f32b14f37d6252bac480dbc38\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T13:43:23Z\\\",\\\"message\\\":\\\"Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.110\\\\\\\", Port:8443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1121 13:43:23.558190 6798 ovn.go:134] Ensuring zone local for Pod openshift-multus/multus-additional-cni-plugins-b9v8b in node crc\\\\nI1121 13:43:23.558832 6798 obj_retry.go:386] Retry successful for *v1.Pod openshift-multus/multus-additional-cni-plugins-b9v8b after 0 failed attempt(s)\\\\nI1121 13:43:23.558866 6798 default_network_controller.go:776] Recording success event on pod openshift-multus/multus-additional-cni-plugins-b9v8b\\\\nI1121 13:43:23.558216 6798 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/multus-m5d24\\\\nI1121 13:43:23.558892 6798 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/multus-m5d24\\\\nI1121 13:43:23.558904 6798 ovn.go:134] Ensuring zone local for Pod openshift-multus/multus-m5d24 in node crc\\\\nI1121 13:43:23.558913 6798 obj_retry.go:386] Retry successful for *v1.Pod openshift-multus/multus-m5d24 after 0 failed attempt(s)\\\\nI1121 13:43:23.558921 6798 default_network_controller.go:776] Recordin\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:43:22Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-tjzm8_openshift-ovn-kubernetes(373d5da7-fae9-4689-9ede-6e2d69a54c02)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ab3fdf87c8fc052cd429333579ede0e857fcc8399de947f661b159e6a5f2a93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tjzm8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:42Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.080476 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b9v8b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0ec3a98-4d89-4f36-a79e-ac65da8672ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://933bd113027e6a7ee2eba33df125dcdcf389c21b99c7dd32bc654edaf7278e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d21a06ad72a199d989c29326725c9d49df6fbb9fc6e9d54bcd1f7bb89c78b02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6d21a06ad72a199d989c29326725c9d49df6fbb9fc6e9d54bcd1f7bb89c78b02\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7216da1e17ab61329ca71325b40fdbd040dbf83f072d565302a571acb1313e53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7216da1e17ab61329ca71325b40fdbd040dbf83f072d565302a571acb1313e53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a466e70f0711cd7d64221f62309d99dfa9e4b9910a69b2397240cfdf244578\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3a466e70f0711cd7d64221f62309d99dfa9e4b9910a69b2397240cfdf244578\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:43:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a6a968fb55017d7e2fee878bb4d50904766820de016afd8f01de3bbd92b2421\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a6a968fb55017d7e2fee878bb4d50904766820de016afd8f01de3bbd92b2421\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:43:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:43:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b9v8b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:42Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.083345 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.083384 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.083396 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.083410 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.083421 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:42Z","lastTransitionTime":"2025-11-21T13:43:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.101382 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f826000-be5b-4f8f-bdc5-b80e11bb5e65\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cac876542527f108f89313704d6275aed6b735176f7f38b0fccbfcd79fdbf6e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa9a560543d545bd50cbb9aa0e907a992f9b3afb36de7ec5e72010dd835d2574\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c86055d41460f757efc29eaa62834faf3f14f9ca5ba534479d0fcd0a43d3bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bca102c9301a0963f4be54906a6ca418a585d0aa6b063a0512c2a334928f0d88\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T13:42:47Z\\\",\\\"message\\\":\\\"le observer\\\\nW1121 13:42:47.565555 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 13:42:47.567527 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 13:42:47.569658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3912986073/tls.crt::/tmp/serving-cert-3912986073/tls.key\\\\\\\"\\\\nI1121 13:42:47.852533 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 13:42:47.856751 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 13:42:47.856781 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 13:42:47.856814 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 13:42:47.856821 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 13:42:47.862211 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 13:42:47.862280 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862290 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862309 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 13:42:47.862319 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 13:42:47.862326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 13:42:47.862333 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 13:42:47.863057 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 13:42:47.865438 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6b16c41d8bc248fc4de65102a71d3875d1ab768432f61581605fa487ebfc9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:42Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.120742 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede95ef8b82acda5cadd081a37fcb2a35fab8269c7ec403bb33a6feb8bf9eb88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3037d1f01bc9704cae9aa3eb4760e4dc737b1990a6ae5a007d3ec412efad85a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:42Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.136918 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:42Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.153262 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1236e5c5c7c8db59fd2faa688e9b781fe94721cc8aa644dd9ab91df2684c617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:42Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.172877 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m5d24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0077329a-abad-4c6d-a601-2dc01fd83184\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://49d78ec51ff96fc1ff9c0e668751296475767d8bf09673ff5d0f1a9d896d6595\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://24fe246ff402a8854ee5e55ccc507a2e497dbb2cdfed3f0f8b380f00b9436661\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T13:43:40Z\\\",\\\"message\\\":\\\"2025-11-21T13:42:55+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_479ae883-3ec6-417b-ad49-d387b81ff874\\\\n2025-11-21T13:42:55+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_479ae883-3ec6-417b-ad49-d387b81ff874 to /host/opt/cni/bin/\\\\n2025-11-21T13:42:55Z [verbose] multus-daemon started\\\\n2025-11-21T13:42:55Z [verbose] Readiness Indicator file check\\\\n2025-11-21T13:43:40Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmd8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m5d24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:42Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.186212 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.186261 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.186273 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.186295 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.186311 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:42Z","lastTransitionTime":"2025-11-21T13:43:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.190528 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f5a729-05d1-4f84-a216-1df3233af57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c46519115b067feef9d8fb5783b8b9420bf99d97515021a7d389e6cdf1d64112\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e5c730e837240b2ed45dff8a5411b8b49d21e7fbfb2dfcc6aef568b73b57745\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xxlvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:42Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.205504 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75a51560-1657-43fa-880f-bece0b75e088\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1dc13dd497fa4611689b3c047c602511745dbff2b9a797f8ea7316046531717\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca37cc062dd8763ac13b5a07fcd06f2997b1ff9cfe38d9a9ff3091980d679932\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57552cf42d0ae179e9c1c67120613da40995fbe308e06dbd466d0f71167142b2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f00c6fbd3ed9ce5e2787b591f39bd80828b96dd53f15696adfe62c3df47ed47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f00c6fbd3ed9ce5e2787b591f39bd80828b96dd53f15696adfe62c3df47ed47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:42Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.217579 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e4ae5c5-33df-4caa-9e92-c7070da4d494\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e8fe024377d7d7d21f5e1826640081a7cada1c3ac389b94deccdc80360afc56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e019adb31c6276b82ca39edac3f8d20a563baeb5bc34dbd64df79c9ae690b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3e019adb31c6276b82ca39edac3f8d20a563baeb5bc34dbd64df79c9ae690b5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:42Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.231539 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:42Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.244290 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7vfdg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c337e24-9cef-4932-92ae-5a175379c77a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0eeb4cc8e3340d9eb9710ca7b55244d4055d6bc7cc31a6d227f233988b242823\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrzcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc648165e145d8ed6a43aff3ffb558380dc55ffd03816922b643bf7f740088fa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrzcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:43:05Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7vfdg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:42Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.262082 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:42Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.274701 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pvdwc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87822156-53e8-4eb5-b241-db506a21a1b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc374f8a6deccf60de941df327adc9f29d951a10746bd754c0d4f5573a141a71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jzt65\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pvdwc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:42Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.287375 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c335cd8-618b-4871-a0e2-deaa61ddc49a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5f6320dbfb8d910e52de319fe5350b435c1c9f00a2e1d5b2b953fb6d1688984\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://273a23deb0bee7d80bc12f28a4056a5b843e81cc7c411273e49c3aa0fdba5182\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c93dddad8f7a853e1302ba96f3fc6d6626b22de64c8cfd1ee63996820d0816cd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebb7634a6507b2323d36c3d57b19c374862e0bada0e81150da9db315e5812f12\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:42Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.288844 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.288889 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.288903 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.288923 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.288936 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:42Z","lastTransitionTime":"2025-11-21T13:43:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.306527 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1b5e12d17b3e683349818698223816569514a9f4ae5d14ba1f5661c472fce39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:42Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.318800 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bj52j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9cc533c-2914-45d2-97b4-d6e35361450d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b1cfd49e0e5564696bd26f92acb10ca3430f81dc3f690a51ecd7bfa14876bccb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdp6q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bj52j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:42Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.333131 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-x5wnh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3aabda0-97d9-4886-8909-1c423c4d3238\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p8kp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p8kp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:43:07Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-x5wnh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:42Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.391948 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.391975 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.391985 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.392012 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.392022 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:42Z","lastTransitionTime":"2025-11-21T13:43:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.457115 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:43:42 crc kubenswrapper[5133]: E1121 13:43:42.457246 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.477328 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c335cd8-618b-4871-a0e2-deaa61ddc49a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5f6320dbfb8d910e52de319fe5350b435c1c9f00a2e1d5b2b953fb6d1688984\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://273a23deb0bee7d80bc12f28a4056a5b843e81cc7c411273e49c3aa0fdba5182\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c93dddad8f7a853e1302ba96f3fc6d6626b22de64c8cfd1ee63996820d0816cd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebb7634a6507b2323d36c3d57b19c374862e0bada0e81150da9db315e5812f12\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:42Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.494473 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.494546 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.494559 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.494576 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.494589 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:42Z","lastTransitionTime":"2025-11-21T13:43:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.497229 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1b5e12d17b3e683349818698223816569514a9f4ae5d14ba1f5661c472fce39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:42Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.513173 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bj52j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9cc533c-2914-45d2-97b4-d6e35361450d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b1cfd49e0e5564696bd26f92acb10ca3430f81dc3f690a51ecd7bfa14876bccb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdp6q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bj52j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:42Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.529126 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-x5wnh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3aabda0-97d9-4886-8909-1c423c4d3238\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p8kp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p8kp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:43:07Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-x5wnh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:42Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.545737 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f5a729-05d1-4f84-a216-1df3233af57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c46519115b067feef9d8fb5783b8b9420bf99d97515021a7d389e6cdf1d64112\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e5c730e837240b2ed45dff8a5411b8b49d21e7fbfb2dfcc6aef568b73b57745\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xxlvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:42Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.571826 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"373d5da7-fae9-4689-9ede-6e2d69a54c02\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce076f27563e648bcbfd183634e87e0e31cedc359d0df1edc6af448b2a18f1a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b6bfce121246f367a034c172b839a31fe309cfc0f83db7ab4e48cb26d6a5145\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c53aca99f41348a8343f7a2a2afd9ca78e2e4ba6aae9bb06cdb3ed66c9d79aa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aee1dbbc6cd90fac255e86ddb27f159eba2e08dc6cc749a8eb351842330ee6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://563b9e061f37ddab57173a01efbf7bf025c470edccc47a03e7c5bb1e317a289f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd5003cc4327d8234259623232e844463af0efdc0b3e395fa3e2c30c714b872d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e71d9a11d15c18007af7493702bac6d13cc2845f32b14f37d6252bac480dbc38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e71d9a11d15c18007af7493702bac6d13cc2845f32b14f37d6252bac480dbc38\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T13:43:23Z\\\",\\\"message\\\":\\\"Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.110\\\\\\\", Port:8443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1121 13:43:23.558190 6798 ovn.go:134] Ensuring zone local for Pod openshift-multus/multus-additional-cni-plugins-b9v8b in node crc\\\\nI1121 13:43:23.558832 6798 obj_retry.go:386] Retry successful for *v1.Pod openshift-multus/multus-additional-cni-plugins-b9v8b after 0 failed attempt(s)\\\\nI1121 13:43:23.558866 6798 default_network_controller.go:776] Recording success event on pod openshift-multus/multus-additional-cni-plugins-b9v8b\\\\nI1121 13:43:23.558216 6798 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/multus-m5d24\\\\nI1121 13:43:23.558892 6798 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/multus-m5d24\\\\nI1121 13:43:23.558904 6798 ovn.go:134] Ensuring zone local for Pod openshift-multus/multus-m5d24 in node crc\\\\nI1121 13:43:23.558913 6798 obj_retry.go:386] Retry successful for *v1.Pod openshift-multus/multus-m5d24 after 0 failed attempt(s)\\\\nI1121 13:43:23.558921 6798 default_network_controller.go:776] Recordin\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:43:22Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-tjzm8_openshift-ovn-kubernetes(373d5da7-fae9-4689-9ede-6e2d69a54c02)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ab3fdf87c8fc052cd429333579ede0e857fcc8399de947f661b159e6a5f2a93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tjzm8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:42Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.586101 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b9v8b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0ec3a98-4d89-4f36-a79e-ac65da8672ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://933bd113027e6a7ee2eba33df125dcdcf389c21b99c7dd32bc654edaf7278e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d21a06ad72a199d989c29326725c9d49df6fbb9fc6e9d54bcd1f7bb89c78b02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6d21a06ad72a199d989c29326725c9d49df6fbb9fc6e9d54bcd1f7bb89c78b02\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7216da1e17ab61329ca71325b40fdbd040dbf83f072d565302a571acb1313e53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7216da1e17ab61329ca71325b40fdbd040dbf83f072d565302a571acb1313e53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a466e70f0711cd7d64221f62309d99dfa9e4b9910a69b2397240cfdf244578\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3a466e70f0711cd7d64221f62309d99dfa9e4b9910a69b2397240cfdf244578\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:43:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a6a968fb55017d7e2fee878bb4d50904766820de016afd8f01de3bbd92b2421\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a6a968fb55017d7e2fee878bb4d50904766820de016afd8f01de3bbd92b2421\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:43:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:43:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b9v8b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:42Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.597939 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.598015 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.598028 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.598045 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.598058 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:42Z","lastTransitionTime":"2025-11-21T13:43:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.600292 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f826000-be5b-4f8f-bdc5-b80e11bb5e65\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cac876542527f108f89313704d6275aed6b735176f7f38b0fccbfcd79fdbf6e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa9a560543d545bd50cbb9aa0e907a992f9b3afb36de7ec5e72010dd835d2574\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c86055d41460f757efc29eaa62834faf3f14f9ca5ba534479d0fcd0a43d3bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bca102c9301a0963f4be54906a6ca418a585d0aa6b063a0512c2a334928f0d88\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T13:42:47Z\\\",\\\"message\\\":\\\"le observer\\\\nW1121 13:42:47.565555 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 13:42:47.567527 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 13:42:47.569658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3912986073/tls.crt::/tmp/serving-cert-3912986073/tls.key\\\\\\\"\\\\nI1121 13:42:47.852533 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 13:42:47.856751 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 13:42:47.856781 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 13:42:47.856814 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 13:42:47.856821 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 13:42:47.862211 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 13:42:47.862280 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862290 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862309 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 13:42:47.862319 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 13:42:47.862326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 13:42:47.862333 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 13:42:47.863057 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 13:42:47.865438 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6b16c41d8bc248fc4de65102a71d3875d1ab768432f61581605fa487ebfc9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:42Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.614064 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede95ef8b82acda5cadd081a37fcb2a35fab8269c7ec403bb33a6feb8bf9eb88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3037d1f01bc9704cae9aa3eb4760e4dc737b1990a6ae5a007d3ec412efad85a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:42Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.630776 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:42Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.644068 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1236e5c5c7c8db59fd2faa688e9b781fe94721cc8aa644dd9ab91df2684c617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:42Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.658249 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m5d24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0077329a-abad-4c6d-a601-2dc01fd83184\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://49d78ec51ff96fc1ff9c0e668751296475767d8bf09673ff5d0f1a9d896d6595\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://24fe246ff402a8854ee5e55ccc507a2e497dbb2cdfed3f0f8b380f00b9436661\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T13:43:40Z\\\",\\\"message\\\":\\\"2025-11-21T13:42:55+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_479ae883-3ec6-417b-ad49-d387b81ff874\\\\n2025-11-21T13:42:55+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_479ae883-3ec6-417b-ad49-d387b81ff874 to /host/opt/cni/bin/\\\\n2025-11-21T13:42:55Z [verbose] multus-daemon started\\\\n2025-11-21T13:42:55Z [verbose] Readiness Indicator file check\\\\n2025-11-21T13:43:40Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmd8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m5d24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:42Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.674495 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75a51560-1657-43fa-880f-bece0b75e088\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1dc13dd497fa4611689b3c047c602511745dbff2b9a797f8ea7316046531717\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca37cc062dd8763ac13b5a07fcd06f2997b1ff9cfe38d9a9ff3091980d679932\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57552cf42d0ae179e9c1c67120613da40995fbe308e06dbd466d0f71167142b2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f00c6fbd3ed9ce5e2787b591f39bd80828b96dd53f15696adfe62c3df47ed47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f00c6fbd3ed9ce5e2787b591f39bd80828b96dd53f15696adfe62c3df47ed47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:42Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.685069 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e4ae5c5-33df-4caa-9e92-c7070da4d494\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e8fe024377d7d7d21f5e1826640081a7cada1c3ac389b94deccdc80360afc56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e019adb31c6276b82ca39edac3f8d20a563baeb5bc34dbd64df79c9ae690b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3e019adb31c6276b82ca39edac3f8d20a563baeb5bc34dbd64df79c9ae690b5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:42Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.697118 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:42Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.700065 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.700214 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.700275 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.700337 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.700396 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:42Z","lastTransitionTime":"2025-11-21T13:43:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.710027 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7vfdg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c337e24-9cef-4932-92ae-5a175379c77a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0eeb4cc8e3340d9eb9710ca7b55244d4055d6bc7cc31a6d227f233988b242823\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrzcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc648165e145d8ed6a43aff3ffb558380dc55ffd03816922b643bf7f740088fa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrzcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:43:05Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7vfdg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:42Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.721715 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:42Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.733194 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pvdwc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87822156-53e8-4eb5-b241-db506a21a1b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc374f8a6deccf60de941df327adc9f29d951a10746bd754c0d4f5573a141a71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jzt65\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pvdwc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:42Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.802730 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.802760 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.802769 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.802782 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.802819 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:42Z","lastTransitionTime":"2025-11-21T13:43:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.905980 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.906076 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.906094 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.906119 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:42 crc kubenswrapper[5133]: I1121 13:43:42.906138 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:42Z","lastTransitionTime":"2025-11-21T13:43:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:43 crc kubenswrapper[5133]: I1121 13:43:43.008399 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:43 crc kubenswrapper[5133]: I1121 13:43:43.008753 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:43 crc kubenswrapper[5133]: I1121 13:43:43.008762 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:43 crc kubenswrapper[5133]: I1121 13:43:43.008775 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:43 crc kubenswrapper[5133]: I1121 13:43:43.008784 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:43Z","lastTransitionTime":"2025-11-21T13:43:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:43 crc kubenswrapper[5133]: I1121 13:43:43.111839 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:43 crc kubenswrapper[5133]: I1121 13:43:43.111874 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:43 crc kubenswrapper[5133]: I1121 13:43:43.111887 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:43 crc kubenswrapper[5133]: I1121 13:43:43.111906 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:43 crc kubenswrapper[5133]: I1121 13:43:43.111919 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:43Z","lastTransitionTime":"2025-11-21T13:43:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:43 crc kubenswrapper[5133]: I1121 13:43:43.214537 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:43 crc kubenswrapper[5133]: I1121 13:43:43.214632 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:43 crc kubenswrapper[5133]: I1121 13:43:43.214656 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:43 crc kubenswrapper[5133]: I1121 13:43:43.214685 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:43 crc kubenswrapper[5133]: I1121 13:43:43.214706 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:43Z","lastTransitionTime":"2025-11-21T13:43:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:43 crc kubenswrapper[5133]: I1121 13:43:43.316747 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:43 crc kubenswrapper[5133]: I1121 13:43:43.316793 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:43 crc kubenswrapper[5133]: I1121 13:43:43.316804 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:43 crc kubenswrapper[5133]: I1121 13:43:43.316820 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:43 crc kubenswrapper[5133]: I1121 13:43:43.316831 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:43Z","lastTransitionTime":"2025-11-21T13:43:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:43 crc kubenswrapper[5133]: I1121 13:43:43.419071 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:43 crc kubenswrapper[5133]: I1121 13:43:43.419123 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:43 crc kubenswrapper[5133]: I1121 13:43:43.419133 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:43 crc kubenswrapper[5133]: I1121 13:43:43.419146 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:43 crc kubenswrapper[5133]: I1121 13:43:43.419155 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:43Z","lastTransitionTime":"2025-11-21T13:43:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:43 crc kubenswrapper[5133]: I1121 13:43:43.457448 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:43:43 crc kubenswrapper[5133]: I1121 13:43:43.457503 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:43:43 crc kubenswrapper[5133]: E1121 13:43:43.457571 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 13:43:43 crc kubenswrapper[5133]: E1121 13:43:43.457640 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 13:43:43 crc kubenswrapper[5133]: I1121 13:43:43.457725 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-x5wnh" Nov 21 13:43:43 crc kubenswrapper[5133]: E1121 13:43:43.457779 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-x5wnh" podUID="b3aabda0-97d9-4886-8909-1c423c4d3238" Nov 21 13:43:43 crc kubenswrapper[5133]: I1121 13:43:43.522122 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:43 crc kubenswrapper[5133]: I1121 13:43:43.522416 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:43 crc kubenswrapper[5133]: I1121 13:43:43.522582 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:43 crc kubenswrapper[5133]: I1121 13:43:43.522716 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:43 crc kubenswrapper[5133]: I1121 13:43:43.522814 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:43Z","lastTransitionTime":"2025-11-21T13:43:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:43 crc kubenswrapper[5133]: I1121 13:43:43.625586 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:43 crc kubenswrapper[5133]: I1121 13:43:43.625628 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:43 crc kubenswrapper[5133]: I1121 13:43:43.625639 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:43 crc kubenswrapper[5133]: I1121 13:43:43.625654 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:43 crc kubenswrapper[5133]: I1121 13:43:43.625666 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:43Z","lastTransitionTime":"2025-11-21T13:43:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:43 crc kubenswrapper[5133]: I1121 13:43:43.729086 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:43 crc kubenswrapper[5133]: I1121 13:43:43.729154 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:43 crc kubenswrapper[5133]: I1121 13:43:43.729179 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:43 crc kubenswrapper[5133]: I1121 13:43:43.729208 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:43 crc kubenswrapper[5133]: I1121 13:43:43.729230 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:43Z","lastTransitionTime":"2025-11-21T13:43:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:43 crc kubenswrapper[5133]: I1121 13:43:43.831629 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:43 crc kubenswrapper[5133]: I1121 13:43:43.831707 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:43 crc kubenswrapper[5133]: I1121 13:43:43.831732 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:43 crc kubenswrapper[5133]: I1121 13:43:43.831762 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:43 crc kubenswrapper[5133]: I1121 13:43:43.831785 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:43Z","lastTransitionTime":"2025-11-21T13:43:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:43 crc kubenswrapper[5133]: I1121 13:43:43.934595 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:43 crc kubenswrapper[5133]: I1121 13:43:43.934668 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:43 crc kubenswrapper[5133]: I1121 13:43:43.934680 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:43 crc kubenswrapper[5133]: I1121 13:43:43.934700 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:43 crc kubenswrapper[5133]: I1121 13:43:43.934717 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:43Z","lastTransitionTime":"2025-11-21T13:43:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:44 crc kubenswrapper[5133]: I1121 13:43:44.037474 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:44 crc kubenswrapper[5133]: I1121 13:43:44.037536 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:44 crc kubenswrapper[5133]: I1121 13:43:44.037549 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:44 crc kubenswrapper[5133]: I1121 13:43:44.037569 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:44 crc kubenswrapper[5133]: I1121 13:43:44.037583 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:44Z","lastTransitionTime":"2025-11-21T13:43:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:44 crc kubenswrapper[5133]: I1121 13:43:44.141171 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:44 crc kubenswrapper[5133]: I1121 13:43:44.141215 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:44 crc kubenswrapper[5133]: I1121 13:43:44.141228 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:44 crc kubenswrapper[5133]: I1121 13:43:44.141244 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:44 crc kubenswrapper[5133]: I1121 13:43:44.141254 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:44Z","lastTransitionTime":"2025-11-21T13:43:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:44 crc kubenswrapper[5133]: I1121 13:43:44.244685 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:44 crc kubenswrapper[5133]: I1121 13:43:44.244747 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:44 crc kubenswrapper[5133]: I1121 13:43:44.244769 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:44 crc kubenswrapper[5133]: I1121 13:43:44.244794 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:44 crc kubenswrapper[5133]: I1121 13:43:44.244811 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:44Z","lastTransitionTime":"2025-11-21T13:43:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:44 crc kubenswrapper[5133]: I1121 13:43:44.348222 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:44 crc kubenswrapper[5133]: I1121 13:43:44.348278 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:44 crc kubenswrapper[5133]: I1121 13:43:44.348303 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:44 crc kubenswrapper[5133]: I1121 13:43:44.348333 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:44 crc kubenswrapper[5133]: I1121 13:43:44.348353 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:44Z","lastTransitionTime":"2025-11-21T13:43:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:44 crc kubenswrapper[5133]: I1121 13:43:44.450755 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:44 crc kubenswrapper[5133]: I1121 13:43:44.450793 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:44 crc kubenswrapper[5133]: I1121 13:43:44.450801 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:44 crc kubenswrapper[5133]: I1121 13:43:44.450817 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:44 crc kubenswrapper[5133]: I1121 13:43:44.450825 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:44Z","lastTransitionTime":"2025-11-21T13:43:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:44 crc kubenswrapper[5133]: I1121 13:43:44.457175 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:43:44 crc kubenswrapper[5133]: E1121 13:43:44.457324 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 13:43:44 crc kubenswrapper[5133]: I1121 13:43:44.553046 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:44 crc kubenswrapper[5133]: I1121 13:43:44.553095 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:44 crc kubenswrapper[5133]: I1121 13:43:44.553105 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:44 crc kubenswrapper[5133]: I1121 13:43:44.553123 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:44 crc kubenswrapper[5133]: I1121 13:43:44.553144 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:44Z","lastTransitionTime":"2025-11-21T13:43:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:44 crc kubenswrapper[5133]: I1121 13:43:44.656652 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:44 crc kubenswrapper[5133]: I1121 13:43:44.656722 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:44 crc kubenswrapper[5133]: I1121 13:43:44.656744 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:44 crc kubenswrapper[5133]: I1121 13:43:44.656775 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:44 crc kubenswrapper[5133]: I1121 13:43:44.656798 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:44Z","lastTransitionTime":"2025-11-21T13:43:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:44 crc kubenswrapper[5133]: I1121 13:43:44.759549 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:44 crc kubenswrapper[5133]: I1121 13:43:44.759591 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:44 crc kubenswrapper[5133]: I1121 13:43:44.759601 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:44 crc kubenswrapper[5133]: I1121 13:43:44.759619 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:44 crc kubenswrapper[5133]: I1121 13:43:44.759631 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:44Z","lastTransitionTime":"2025-11-21T13:43:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:44 crc kubenswrapper[5133]: I1121 13:43:44.861927 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:44 crc kubenswrapper[5133]: I1121 13:43:44.861978 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:44 crc kubenswrapper[5133]: I1121 13:43:44.862026 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:44 crc kubenswrapper[5133]: I1121 13:43:44.862048 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:44 crc kubenswrapper[5133]: I1121 13:43:44.862062 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:44Z","lastTransitionTime":"2025-11-21T13:43:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:44 crc kubenswrapper[5133]: I1121 13:43:44.965101 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:44 crc kubenswrapper[5133]: I1121 13:43:44.965184 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:44 crc kubenswrapper[5133]: I1121 13:43:44.965207 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:44 crc kubenswrapper[5133]: I1121 13:43:44.965236 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:44 crc kubenswrapper[5133]: I1121 13:43:44.965258 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:44Z","lastTransitionTime":"2025-11-21T13:43:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.068289 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.068357 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.068380 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.068408 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.068430 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:45Z","lastTransitionTime":"2025-11-21T13:43:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.171348 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.171399 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.171410 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.171427 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.171438 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:45Z","lastTransitionTime":"2025-11-21T13:43:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.273908 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.273972 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.273994 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.274061 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.274084 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:45Z","lastTransitionTime":"2025-11-21T13:43:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.376582 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.376659 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.376676 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.376701 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.376716 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:45Z","lastTransitionTime":"2025-11-21T13:43:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.457282 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-x5wnh" Nov 21 13:43:45 crc kubenswrapper[5133]: E1121 13:43:45.457397 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-x5wnh" podUID="b3aabda0-97d9-4886-8909-1c423c4d3238" Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.457421 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.457509 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:43:45 crc kubenswrapper[5133]: E1121 13:43:45.457578 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 13:43:45 crc kubenswrapper[5133]: E1121 13:43:45.457715 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.479657 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.479709 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.479720 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.479737 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.479747 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:45Z","lastTransitionTime":"2025-11-21T13:43:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.582224 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.582742 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.582807 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.582840 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.582862 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:45Z","lastTransitionTime":"2025-11-21T13:43:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.685677 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.685719 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.685729 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.685745 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.685759 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:45Z","lastTransitionTime":"2025-11-21T13:43:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.788567 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.788606 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.788622 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.788637 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.788648 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:45Z","lastTransitionTime":"2025-11-21T13:43:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.872856 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.872921 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.872938 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.872963 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.872986 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:45Z","lastTransitionTime":"2025-11-21T13:43:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:45 crc kubenswrapper[5133]: E1121 13:43:45.891134 5133 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"eb1f691e-5306-40d5-9666-4e51161aa15a\\\",\\\"systemUUID\\\":\\\"537cb059-79e6-48e5-b353-57bb495db8a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:45Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.895601 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.895631 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.895639 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.895650 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.895659 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:45Z","lastTransitionTime":"2025-11-21T13:43:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:45 crc kubenswrapper[5133]: E1121 13:43:45.909761 5133 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"eb1f691e-5306-40d5-9666-4e51161aa15a\\\",\\\"systemUUID\\\":\\\"537cb059-79e6-48e5-b353-57bb495db8a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:45Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.914026 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.914066 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.914078 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.914096 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.914108 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:45Z","lastTransitionTime":"2025-11-21T13:43:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:45 crc kubenswrapper[5133]: E1121 13:43:45.929693 5133 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"eb1f691e-5306-40d5-9666-4e51161aa15a\\\",\\\"systemUUID\\\":\\\"537cb059-79e6-48e5-b353-57bb495db8a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:45Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.934671 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.934725 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.934742 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.934765 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.934780 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:45Z","lastTransitionTime":"2025-11-21T13:43:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:45 crc kubenswrapper[5133]: E1121 13:43:45.956923 5133 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"eb1f691e-5306-40d5-9666-4e51161aa15a\\\",\\\"systemUUID\\\":\\\"537cb059-79e6-48e5-b353-57bb495db8a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:45Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.962395 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.962474 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.962494 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.962520 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.962538 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:45Z","lastTransitionTime":"2025-11-21T13:43:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:45 crc kubenswrapper[5133]: E1121 13:43:45.982124 5133 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"eb1f691e-5306-40d5-9666-4e51161aa15a\\\",\\\"systemUUID\\\":\\\"537cb059-79e6-48e5-b353-57bb495db8a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:45Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:45 crc kubenswrapper[5133]: E1121 13:43:45.982295 5133 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.984091 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.984120 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.984131 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.984148 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:45 crc kubenswrapper[5133]: I1121 13:43:45.984159 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:45Z","lastTransitionTime":"2025-11-21T13:43:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:46 crc kubenswrapper[5133]: I1121 13:43:46.087267 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:46 crc kubenswrapper[5133]: I1121 13:43:46.087326 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:46 crc kubenswrapper[5133]: I1121 13:43:46.087341 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:46 crc kubenswrapper[5133]: I1121 13:43:46.087358 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:46 crc kubenswrapper[5133]: I1121 13:43:46.087371 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:46Z","lastTransitionTime":"2025-11-21T13:43:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:46 crc kubenswrapper[5133]: I1121 13:43:46.189568 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:46 crc kubenswrapper[5133]: I1121 13:43:46.189617 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:46 crc kubenswrapper[5133]: I1121 13:43:46.189627 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:46 crc kubenswrapper[5133]: I1121 13:43:46.189643 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:46 crc kubenswrapper[5133]: I1121 13:43:46.189652 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:46Z","lastTransitionTime":"2025-11-21T13:43:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:46 crc kubenswrapper[5133]: I1121 13:43:46.295406 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:46 crc kubenswrapper[5133]: I1121 13:43:46.295455 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:46 crc kubenswrapper[5133]: I1121 13:43:46.295469 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:46 crc kubenswrapper[5133]: I1121 13:43:46.295489 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:46 crc kubenswrapper[5133]: I1121 13:43:46.295506 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:46Z","lastTransitionTime":"2025-11-21T13:43:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:46 crc kubenswrapper[5133]: I1121 13:43:46.397931 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:46 crc kubenswrapper[5133]: I1121 13:43:46.397968 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:46 crc kubenswrapper[5133]: I1121 13:43:46.397978 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:46 crc kubenswrapper[5133]: I1121 13:43:46.397994 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:46 crc kubenswrapper[5133]: I1121 13:43:46.398025 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:46Z","lastTransitionTime":"2025-11-21T13:43:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:46 crc kubenswrapper[5133]: I1121 13:43:46.457312 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:43:46 crc kubenswrapper[5133]: E1121 13:43:46.457439 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 13:43:46 crc kubenswrapper[5133]: I1121 13:43:46.500605 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:46 crc kubenswrapper[5133]: I1121 13:43:46.500680 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:46 crc kubenswrapper[5133]: I1121 13:43:46.500692 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:46 crc kubenswrapper[5133]: I1121 13:43:46.500709 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:46 crc kubenswrapper[5133]: I1121 13:43:46.500723 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:46Z","lastTransitionTime":"2025-11-21T13:43:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:46 crc kubenswrapper[5133]: I1121 13:43:46.607465 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:46 crc kubenswrapper[5133]: I1121 13:43:46.607501 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:46 crc kubenswrapper[5133]: I1121 13:43:46.607511 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:46 crc kubenswrapper[5133]: I1121 13:43:46.607527 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:46 crc kubenswrapper[5133]: I1121 13:43:46.607536 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:46Z","lastTransitionTime":"2025-11-21T13:43:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:46 crc kubenswrapper[5133]: I1121 13:43:46.710739 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:46 crc kubenswrapper[5133]: I1121 13:43:46.710809 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:46 crc kubenswrapper[5133]: I1121 13:43:46.710826 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:46 crc kubenswrapper[5133]: I1121 13:43:46.710850 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:46 crc kubenswrapper[5133]: I1121 13:43:46.710868 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:46Z","lastTransitionTime":"2025-11-21T13:43:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:46 crc kubenswrapper[5133]: I1121 13:43:46.813830 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:46 crc kubenswrapper[5133]: I1121 13:43:46.813950 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:46 crc kubenswrapper[5133]: I1121 13:43:46.813973 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:46 crc kubenswrapper[5133]: I1121 13:43:46.814024 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:46 crc kubenswrapper[5133]: I1121 13:43:46.814044 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:46Z","lastTransitionTime":"2025-11-21T13:43:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:46 crc kubenswrapper[5133]: I1121 13:43:46.917312 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:46 crc kubenswrapper[5133]: I1121 13:43:46.917362 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:46 crc kubenswrapper[5133]: I1121 13:43:46.917378 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:46 crc kubenswrapper[5133]: I1121 13:43:46.917402 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:46 crc kubenswrapper[5133]: I1121 13:43:46.917420 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:46Z","lastTransitionTime":"2025-11-21T13:43:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:47 crc kubenswrapper[5133]: I1121 13:43:47.035298 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:47 crc kubenswrapper[5133]: I1121 13:43:47.035366 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:47 crc kubenswrapper[5133]: I1121 13:43:47.035384 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:47 crc kubenswrapper[5133]: I1121 13:43:47.035410 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:47 crc kubenswrapper[5133]: I1121 13:43:47.035429 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:47Z","lastTransitionTime":"2025-11-21T13:43:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:47 crc kubenswrapper[5133]: I1121 13:43:47.137850 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:47 crc kubenswrapper[5133]: I1121 13:43:47.137957 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:47 crc kubenswrapper[5133]: I1121 13:43:47.137978 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:47 crc kubenswrapper[5133]: I1121 13:43:47.138032 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:47 crc kubenswrapper[5133]: I1121 13:43:47.138055 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:47Z","lastTransitionTime":"2025-11-21T13:43:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:47 crc kubenswrapper[5133]: I1121 13:43:47.241385 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:47 crc kubenswrapper[5133]: I1121 13:43:47.241445 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:47 crc kubenswrapper[5133]: I1121 13:43:47.241462 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:47 crc kubenswrapper[5133]: I1121 13:43:47.241490 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:47 crc kubenswrapper[5133]: I1121 13:43:47.241509 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:47Z","lastTransitionTime":"2025-11-21T13:43:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:47 crc kubenswrapper[5133]: I1121 13:43:47.344439 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:47 crc kubenswrapper[5133]: I1121 13:43:47.344518 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:47 crc kubenswrapper[5133]: I1121 13:43:47.344541 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:47 crc kubenswrapper[5133]: I1121 13:43:47.344573 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:47 crc kubenswrapper[5133]: I1121 13:43:47.344596 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:47Z","lastTransitionTime":"2025-11-21T13:43:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:47 crc kubenswrapper[5133]: I1121 13:43:47.447942 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:47 crc kubenswrapper[5133]: I1121 13:43:47.448067 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:47 crc kubenswrapper[5133]: I1121 13:43:47.448107 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:47 crc kubenswrapper[5133]: I1121 13:43:47.448140 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:47 crc kubenswrapper[5133]: I1121 13:43:47.448166 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:47Z","lastTransitionTime":"2025-11-21T13:43:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:47 crc kubenswrapper[5133]: I1121 13:43:47.456534 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:43:47 crc kubenswrapper[5133]: I1121 13:43:47.456534 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-x5wnh" Nov 21 13:43:47 crc kubenswrapper[5133]: E1121 13:43:47.456772 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 13:43:47 crc kubenswrapper[5133]: I1121 13:43:47.456561 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:43:47 crc kubenswrapper[5133]: E1121 13:43:47.456879 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-x5wnh" podUID="b3aabda0-97d9-4886-8909-1c423c4d3238" Nov 21 13:43:47 crc kubenswrapper[5133]: E1121 13:43:47.457101 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 13:43:47 crc kubenswrapper[5133]: I1121 13:43:47.551317 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:47 crc kubenswrapper[5133]: I1121 13:43:47.551368 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:47 crc kubenswrapper[5133]: I1121 13:43:47.551382 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:47 crc kubenswrapper[5133]: I1121 13:43:47.551404 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:47 crc kubenswrapper[5133]: I1121 13:43:47.551421 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:47Z","lastTransitionTime":"2025-11-21T13:43:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:47 crc kubenswrapper[5133]: I1121 13:43:47.654961 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:47 crc kubenswrapper[5133]: I1121 13:43:47.655049 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:47 crc kubenswrapper[5133]: I1121 13:43:47.655067 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:47 crc kubenswrapper[5133]: I1121 13:43:47.655093 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:47 crc kubenswrapper[5133]: I1121 13:43:47.655111 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:47Z","lastTransitionTime":"2025-11-21T13:43:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:47 crc kubenswrapper[5133]: I1121 13:43:47.758530 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:47 crc kubenswrapper[5133]: I1121 13:43:47.758592 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:47 crc kubenswrapper[5133]: I1121 13:43:47.758608 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:47 crc kubenswrapper[5133]: I1121 13:43:47.758634 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:47 crc kubenswrapper[5133]: I1121 13:43:47.758652 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:47Z","lastTransitionTime":"2025-11-21T13:43:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:47 crc kubenswrapper[5133]: I1121 13:43:47.861819 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:47 crc kubenswrapper[5133]: I1121 13:43:47.861873 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:47 crc kubenswrapper[5133]: I1121 13:43:47.861890 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:47 crc kubenswrapper[5133]: I1121 13:43:47.861908 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:47 crc kubenswrapper[5133]: I1121 13:43:47.861924 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:47Z","lastTransitionTime":"2025-11-21T13:43:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:47 crc kubenswrapper[5133]: I1121 13:43:47.965342 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:47 crc kubenswrapper[5133]: I1121 13:43:47.965407 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:47 crc kubenswrapper[5133]: I1121 13:43:47.965425 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:47 crc kubenswrapper[5133]: I1121 13:43:47.965452 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:47 crc kubenswrapper[5133]: I1121 13:43:47.965477 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:47Z","lastTransitionTime":"2025-11-21T13:43:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:48 crc kubenswrapper[5133]: I1121 13:43:48.068956 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:48 crc kubenswrapper[5133]: I1121 13:43:48.069053 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:48 crc kubenswrapper[5133]: I1121 13:43:48.069072 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:48 crc kubenswrapper[5133]: I1121 13:43:48.069100 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:48 crc kubenswrapper[5133]: I1121 13:43:48.069121 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:48Z","lastTransitionTime":"2025-11-21T13:43:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:48 crc kubenswrapper[5133]: I1121 13:43:48.172713 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:48 crc kubenswrapper[5133]: I1121 13:43:48.172911 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:48 crc kubenswrapper[5133]: I1121 13:43:48.172935 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:48 crc kubenswrapper[5133]: I1121 13:43:48.173017 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:48 crc kubenswrapper[5133]: I1121 13:43:48.173042 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:48Z","lastTransitionTime":"2025-11-21T13:43:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:48 crc kubenswrapper[5133]: I1121 13:43:48.276517 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:48 crc kubenswrapper[5133]: I1121 13:43:48.276595 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:48 crc kubenswrapper[5133]: I1121 13:43:48.276621 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:48 crc kubenswrapper[5133]: I1121 13:43:48.276654 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:48 crc kubenswrapper[5133]: I1121 13:43:48.276679 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:48Z","lastTransitionTime":"2025-11-21T13:43:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:48 crc kubenswrapper[5133]: I1121 13:43:48.380809 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:48 crc kubenswrapper[5133]: I1121 13:43:48.380888 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:48 crc kubenswrapper[5133]: I1121 13:43:48.380908 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:48 crc kubenswrapper[5133]: I1121 13:43:48.380939 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:48 crc kubenswrapper[5133]: I1121 13:43:48.380960 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:48Z","lastTransitionTime":"2025-11-21T13:43:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:48 crc kubenswrapper[5133]: I1121 13:43:48.457155 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:43:48 crc kubenswrapper[5133]: E1121 13:43:48.457315 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 13:43:48 crc kubenswrapper[5133]: I1121 13:43:48.483754 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:48 crc kubenswrapper[5133]: I1121 13:43:48.483824 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:48 crc kubenswrapper[5133]: I1121 13:43:48.483840 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:48 crc kubenswrapper[5133]: I1121 13:43:48.483867 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:48 crc kubenswrapper[5133]: I1121 13:43:48.483881 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:48Z","lastTransitionTime":"2025-11-21T13:43:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:48 crc kubenswrapper[5133]: I1121 13:43:48.587205 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:48 crc kubenswrapper[5133]: I1121 13:43:48.587265 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:48 crc kubenswrapper[5133]: I1121 13:43:48.587284 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:48 crc kubenswrapper[5133]: I1121 13:43:48.587307 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:48 crc kubenswrapper[5133]: I1121 13:43:48.587325 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:48Z","lastTransitionTime":"2025-11-21T13:43:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:48 crc kubenswrapper[5133]: I1121 13:43:48.689769 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:48 crc kubenswrapper[5133]: I1121 13:43:48.689847 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:48 crc kubenswrapper[5133]: I1121 13:43:48.689864 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:48 crc kubenswrapper[5133]: I1121 13:43:48.689888 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:48 crc kubenswrapper[5133]: I1121 13:43:48.689905 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:48Z","lastTransitionTime":"2025-11-21T13:43:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:48 crc kubenswrapper[5133]: I1121 13:43:48.793926 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:48 crc kubenswrapper[5133]: I1121 13:43:48.794062 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:48 crc kubenswrapper[5133]: I1121 13:43:48.794093 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:48 crc kubenswrapper[5133]: I1121 13:43:48.794125 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:48 crc kubenswrapper[5133]: I1121 13:43:48.794147 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:48Z","lastTransitionTime":"2025-11-21T13:43:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:48 crc kubenswrapper[5133]: I1121 13:43:48.897613 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:48 crc kubenswrapper[5133]: I1121 13:43:48.897691 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:48 crc kubenswrapper[5133]: I1121 13:43:48.897714 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:48 crc kubenswrapper[5133]: I1121 13:43:48.897746 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:48 crc kubenswrapper[5133]: I1121 13:43:48.897768 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:48Z","lastTransitionTime":"2025-11-21T13:43:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:49 crc kubenswrapper[5133]: I1121 13:43:49.000756 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:49 crc kubenswrapper[5133]: I1121 13:43:49.000843 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:49 crc kubenswrapper[5133]: I1121 13:43:49.000862 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:49 crc kubenswrapper[5133]: I1121 13:43:49.000917 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:49 crc kubenswrapper[5133]: I1121 13:43:49.000933 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:49Z","lastTransitionTime":"2025-11-21T13:43:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:49 crc kubenswrapper[5133]: I1121 13:43:49.103703 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:49 crc kubenswrapper[5133]: I1121 13:43:49.103762 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:49 crc kubenswrapper[5133]: I1121 13:43:49.103778 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:49 crc kubenswrapper[5133]: I1121 13:43:49.103802 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:49 crc kubenswrapper[5133]: I1121 13:43:49.103863 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:49Z","lastTransitionTime":"2025-11-21T13:43:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:49 crc kubenswrapper[5133]: I1121 13:43:49.206227 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:49 crc kubenswrapper[5133]: I1121 13:43:49.206266 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:49 crc kubenswrapper[5133]: I1121 13:43:49.206274 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:49 crc kubenswrapper[5133]: I1121 13:43:49.206287 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:49 crc kubenswrapper[5133]: I1121 13:43:49.206299 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:49Z","lastTransitionTime":"2025-11-21T13:43:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:49 crc kubenswrapper[5133]: I1121 13:43:49.309255 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:49 crc kubenswrapper[5133]: I1121 13:43:49.309326 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:49 crc kubenswrapper[5133]: I1121 13:43:49.309348 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:49 crc kubenswrapper[5133]: I1121 13:43:49.309380 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:49 crc kubenswrapper[5133]: I1121 13:43:49.309405 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:49Z","lastTransitionTime":"2025-11-21T13:43:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:49 crc kubenswrapper[5133]: I1121 13:43:49.413450 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:49 crc kubenswrapper[5133]: I1121 13:43:49.413522 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:49 crc kubenswrapper[5133]: I1121 13:43:49.413539 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:49 crc kubenswrapper[5133]: I1121 13:43:49.413564 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:49 crc kubenswrapper[5133]: I1121 13:43:49.413584 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:49Z","lastTransitionTime":"2025-11-21T13:43:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:49 crc kubenswrapper[5133]: I1121 13:43:49.457196 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:43:49 crc kubenswrapper[5133]: I1121 13:43:49.457256 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:43:49 crc kubenswrapper[5133]: I1121 13:43:49.457371 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-x5wnh" Nov 21 13:43:49 crc kubenswrapper[5133]: E1121 13:43:49.457601 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 13:43:49 crc kubenswrapper[5133]: E1121 13:43:49.457869 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 13:43:49 crc kubenswrapper[5133]: E1121 13:43:49.458112 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-x5wnh" podUID="b3aabda0-97d9-4886-8909-1c423c4d3238" Nov 21 13:43:49 crc kubenswrapper[5133]: I1121 13:43:49.517115 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:49 crc kubenswrapper[5133]: I1121 13:43:49.517170 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:49 crc kubenswrapper[5133]: I1121 13:43:49.517187 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:49 crc kubenswrapper[5133]: I1121 13:43:49.517204 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:49 crc kubenswrapper[5133]: I1121 13:43:49.517214 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:49Z","lastTransitionTime":"2025-11-21T13:43:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:49 crc kubenswrapper[5133]: I1121 13:43:49.620389 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:49 crc kubenswrapper[5133]: I1121 13:43:49.620443 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:49 crc kubenswrapper[5133]: I1121 13:43:49.620459 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:49 crc kubenswrapper[5133]: I1121 13:43:49.620484 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:49 crc kubenswrapper[5133]: I1121 13:43:49.620501 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:49Z","lastTransitionTime":"2025-11-21T13:43:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:49 crc kubenswrapper[5133]: I1121 13:43:49.723485 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:49 crc kubenswrapper[5133]: I1121 13:43:49.723558 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:49 crc kubenswrapper[5133]: I1121 13:43:49.723573 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:49 crc kubenswrapper[5133]: I1121 13:43:49.723588 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:49 crc kubenswrapper[5133]: I1121 13:43:49.723599 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:49Z","lastTransitionTime":"2025-11-21T13:43:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:49 crc kubenswrapper[5133]: I1121 13:43:49.826414 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:49 crc kubenswrapper[5133]: I1121 13:43:49.826488 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:49 crc kubenswrapper[5133]: I1121 13:43:49.826515 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:49 crc kubenswrapper[5133]: I1121 13:43:49.826547 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:49 crc kubenswrapper[5133]: I1121 13:43:49.826573 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:49Z","lastTransitionTime":"2025-11-21T13:43:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:49 crc kubenswrapper[5133]: I1121 13:43:49.930050 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:49 crc kubenswrapper[5133]: I1121 13:43:49.930127 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:49 crc kubenswrapper[5133]: I1121 13:43:49.930139 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:49 crc kubenswrapper[5133]: I1121 13:43:49.930159 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:49 crc kubenswrapper[5133]: I1121 13:43:49.930172 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:49Z","lastTransitionTime":"2025-11-21T13:43:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:50 crc kubenswrapper[5133]: I1121 13:43:50.033519 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:50 crc kubenswrapper[5133]: I1121 13:43:50.033626 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:50 crc kubenswrapper[5133]: I1121 13:43:50.033668 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:50 crc kubenswrapper[5133]: I1121 13:43:50.033704 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:50 crc kubenswrapper[5133]: I1121 13:43:50.033725 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:50Z","lastTransitionTime":"2025-11-21T13:43:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:50 crc kubenswrapper[5133]: I1121 13:43:50.136172 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:50 crc kubenswrapper[5133]: I1121 13:43:50.136228 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:50 crc kubenswrapper[5133]: I1121 13:43:50.136249 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:50 crc kubenswrapper[5133]: I1121 13:43:50.136272 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:50 crc kubenswrapper[5133]: I1121 13:43:50.136287 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:50Z","lastTransitionTime":"2025-11-21T13:43:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:50 crc kubenswrapper[5133]: I1121 13:43:50.239415 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:50 crc kubenswrapper[5133]: I1121 13:43:50.239494 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:50 crc kubenswrapper[5133]: I1121 13:43:50.239512 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:50 crc kubenswrapper[5133]: I1121 13:43:50.239538 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:50 crc kubenswrapper[5133]: I1121 13:43:50.239555 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:50Z","lastTransitionTime":"2025-11-21T13:43:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:50 crc kubenswrapper[5133]: I1121 13:43:50.343917 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:50 crc kubenswrapper[5133]: I1121 13:43:50.344216 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:50 crc kubenswrapper[5133]: I1121 13:43:50.344234 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:50 crc kubenswrapper[5133]: I1121 13:43:50.344260 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:50 crc kubenswrapper[5133]: I1121 13:43:50.344276 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:50Z","lastTransitionTime":"2025-11-21T13:43:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:50 crc kubenswrapper[5133]: I1121 13:43:50.448278 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:50 crc kubenswrapper[5133]: I1121 13:43:50.448337 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:50 crc kubenswrapper[5133]: I1121 13:43:50.448354 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:50 crc kubenswrapper[5133]: I1121 13:43:50.448376 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:50 crc kubenswrapper[5133]: I1121 13:43:50.448396 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:50Z","lastTransitionTime":"2025-11-21T13:43:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:50 crc kubenswrapper[5133]: I1121 13:43:50.457097 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:43:50 crc kubenswrapper[5133]: E1121 13:43:50.457316 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 13:43:50 crc kubenswrapper[5133]: I1121 13:43:50.458429 5133 scope.go:117] "RemoveContainer" containerID="e71d9a11d15c18007af7493702bac6d13cc2845f32b14f37d6252bac480dbc38" Nov 21 13:43:50 crc kubenswrapper[5133]: I1121 13:43:50.551747 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:50 crc kubenswrapper[5133]: I1121 13:43:50.551811 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:50 crc kubenswrapper[5133]: I1121 13:43:50.551828 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:50 crc kubenswrapper[5133]: I1121 13:43:50.551856 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:50 crc kubenswrapper[5133]: I1121 13:43:50.551883 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:50Z","lastTransitionTime":"2025-11-21T13:43:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:50 crc kubenswrapper[5133]: I1121 13:43:50.654715 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:50 crc kubenswrapper[5133]: I1121 13:43:50.654765 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:50 crc kubenswrapper[5133]: I1121 13:43:50.654783 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:50 crc kubenswrapper[5133]: I1121 13:43:50.654807 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:50 crc kubenswrapper[5133]: I1121 13:43:50.654825 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:50Z","lastTransitionTime":"2025-11-21T13:43:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:50 crc kubenswrapper[5133]: I1121 13:43:50.757259 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:50 crc kubenswrapper[5133]: I1121 13:43:50.757297 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:50 crc kubenswrapper[5133]: I1121 13:43:50.757308 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:50 crc kubenswrapper[5133]: I1121 13:43:50.757324 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:50 crc kubenswrapper[5133]: I1121 13:43:50.757338 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:50Z","lastTransitionTime":"2025-11-21T13:43:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:50 crc kubenswrapper[5133]: I1121 13:43:50.860162 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:50 crc kubenswrapper[5133]: I1121 13:43:50.860233 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:50 crc kubenswrapper[5133]: I1121 13:43:50.860252 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:50 crc kubenswrapper[5133]: I1121 13:43:50.860279 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:50 crc kubenswrapper[5133]: I1121 13:43:50.860323 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:50Z","lastTransitionTime":"2025-11-21T13:43:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:50 crc kubenswrapper[5133]: I1121 13:43:50.963496 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:50 crc kubenswrapper[5133]: I1121 13:43:50.963571 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:50 crc kubenswrapper[5133]: I1121 13:43:50.963594 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:50 crc kubenswrapper[5133]: I1121 13:43:50.963624 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:50 crc kubenswrapper[5133]: I1121 13:43:50.963648 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:50Z","lastTransitionTime":"2025-11-21T13:43:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.061318 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-tjzm8_373d5da7-fae9-4689-9ede-6e2d69a54c02/ovnkube-controller/2.log" Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.065915 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.065982 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.066042 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.066074 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.066100 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:51Z","lastTransitionTime":"2025-11-21T13:43:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.066548 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" event={"ID":"373d5da7-fae9-4689-9ede-6e2d69a54c02","Type":"ContainerStarted","Data":"5ccbca4b83db30237624e807299bb17bb84a66216f27148373ea648a4c0cc962"} Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.067182 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.097449 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7vfdg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c337e24-9cef-4932-92ae-5a175379c77a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0eeb4cc8e3340d9eb9710ca7b55244d4055d6bc7cc31a6d227f233988b242823\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrzcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc648165e145d8ed6a43aff3ffb558380dc55ffd03816922b643bf7f740088fa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrzcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:43:05Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7vfdg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:51Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.120264 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75a51560-1657-43fa-880f-bece0b75e088\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1dc13dd497fa4611689b3c047c602511745dbff2b9a797f8ea7316046531717\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca37cc062dd8763ac13b5a07fcd06f2997b1ff9cfe38d9a9ff3091980d679932\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57552cf42d0ae179e9c1c67120613da40995fbe308e06dbd466d0f71167142b2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f00c6fbd3ed9ce5e2787b591f39bd80828b96dd53f15696adfe62c3df47ed47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f00c6fbd3ed9ce5e2787b591f39bd80828b96dd53f15696adfe62c3df47ed47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:51Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.137742 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e4ae5c5-33df-4caa-9e92-c7070da4d494\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e8fe024377d7d7d21f5e1826640081a7cada1c3ac389b94deccdc80360afc56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e019adb31c6276b82ca39edac3f8d20a563baeb5bc34dbd64df79c9ae690b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3e019adb31c6276b82ca39edac3f8d20a563baeb5bc34dbd64df79c9ae690b5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:51Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.166824 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:51Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.168825 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.168878 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.168895 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.168915 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.168930 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:51Z","lastTransitionTime":"2025-11-21T13:43:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.185293 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:51Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.198347 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pvdwc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87822156-53e8-4eb5-b241-db506a21a1b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc374f8a6deccf60de941df327adc9f29d951a10746bd754c0d4f5573a141a71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jzt65\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pvdwc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:51Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.210376 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c335cd8-618b-4871-a0e2-deaa61ddc49a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5f6320dbfb8d910e52de319fe5350b435c1c9f00a2e1d5b2b953fb6d1688984\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://273a23deb0bee7d80bc12f28a4056a5b843e81cc7c411273e49c3aa0fdba5182\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c93dddad8f7a853e1302ba96f3fc6d6626b22de64c8cfd1ee63996820d0816cd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebb7634a6507b2323d36c3d57b19c374862e0bada0e81150da9db315e5812f12\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:51Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.220781 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1b5e12d17b3e683349818698223816569514a9f4ae5d14ba1f5661c472fce39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:51Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.228775 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bj52j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9cc533c-2914-45d2-97b4-d6e35361450d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b1cfd49e0e5564696bd26f92acb10ca3430f81dc3f690a51ecd7bfa14876bccb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdp6q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bj52j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:51Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.237790 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-x5wnh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3aabda0-97d9-4886-8909-1c423c4d3238\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p8kp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p8kp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:43:07Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-x5wnh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:51Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.248215 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m5d24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0077329a-abad-4c6d-a601-2dc01fd83184\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://49d78ec51ff96fc1ff9c0e668751296475767d8bf09673ff5d0f1a9d896d6595\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://24fe246ff402a8854ee5e55ccc507a2e497dbb2cdfed3f0f8b380f00b9436661\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T13:43:40Z\\\",\\\"message\\\":\\\"2025-11-21T13:42:55+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_479ae883-3ec6-417b-ad49-d387b81ff874\\\\n2025-11-21T13:42:55+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_479ae883-3ec6-417b-ad49-d387b81ff874 to /host/opt/cni/bin/\\\\n2025-11-21T13:42:55Z [verbose] multus-daemon started\\\\n2025-11-21T13:42:55Z [verbose] Readiness Indicator file check\\\\n2025-11-21T13:43:40Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmd8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m5d24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:51Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.259553 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f5a729-05d1-4f84-a216-1df3233af57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c46519115b067feef9d8fb5783b8b9420bf99d97515021a7d389e6cdf1d64112\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e5c730e837240b2ed45dff8a5411b8b49d21e7fbfb2dfcc6aef568b73b57745\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xxlvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:51Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.271909 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.271972 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.271983 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.272021 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.272038 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:51Z","lastTransitionTime":"2025-11-21T13:43:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.283621 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"373d5da7-fae9-4689-9ede-6e2d69a54c02\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce076f27563e648bcbfd183634e87e0e31cedc359d0df1edc6af448b2a18f1a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b6bfce121246f367a034c172b839a31fe309cfc0f83db7ab4e48cb26d6a5145\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c53aca99f41348a8343f7a2a2afd9ca78e2e4ba6aae9bb06cdb3ed66c9d79aa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aee1dbbc6cd90fac255e86ddb27f159eba2e08dc6cc749a8eb351842330ee6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://563b9e061f37ddab57173a01efbf7bf025c470edccc47a03e7c5bb1e317a289f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd5003cc4327d8234259623232e844463af0efdc0b3e395fa3e2c30c714b872d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ccbca4b83db30237624e807299bb17bb84a66216f27148373ea648a4c0cc962\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e71d9a11d15c18007af7493702bac6d13cc2845f32b14f37d6252bac480dbc38\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T13:43:23Z\\\",\\\"message\\\":\\\"Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.110\\\\\\\", Port:8443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1121 13:43:23.558190 6798 ovn.go:134] Ensuring zone local for Pod openshift-multus/multus-additional-cni-plugins-b9v8b in node crc\\\\nI1121 13:43:23.558832 6798 obj_retry.go:386] Retry successful for *v1.Pod openshift-multus/multus-additional-cni-plugins-b9v8b after 0 failed attempt(s)\\\\nI1121 13:43:23.558866 6798 default_network_controller.go:776] Recording success event on pod openshift-multus/multus-additional-cni-plugins-b9v8b\\\\nI1121 13:43:23.558216 6798 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/multus-m5d24\\\\nI1121 13:43:23.558892 6798 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/multus-m5d24\\\\nI1121 13:43:23.558904 6798 ovn.go:134] Ensuring zone local for Pod openshift-multus/multus-m5d24 in node crc\\\\nI1121 13:43:23.558913 6798 obj_retry.go:386] Retry successful for *v1.Pod openshift-multus/multus-m5d24 after 0 failed attempt(s)\\\\nI1121 13:43:23.558921 6798 default_network_controller.go:776] Recordin\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:43:22Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ab3fdf87c8fc052cd429333579ede0e857fcc8399de947f661b159e6a5f2a93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tjzm8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:51Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.301442 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b9v8b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0ec3a98-4d89-4f36-a79e-ac65da8672ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://933bd113027e6a7ee2eba33df125dcdcf389c21b99c7dd32bc654edaf7278e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d21a06ad72a199d989c29326725c9d49df6fbb9fc6e9d54bcd1f7bb89c78b02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6d21a06ad72a199d989c29326725c9d49df6fbb9fc6e9d54bcd1f7bb89c78b02\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7216da1e17ab61329ca71325b40fdbd040dbf83f072d565302a571acb1313e53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7216da1e17ab61329ca71325b40fdbd040dbf83f072d565302a571acb1313e53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a466e70f0711cd7d64221f62309d99dfa9e4b9910a69b2397240cfdf244578\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3a466e70f0711cd7d64221f62309d99dfa9e4b9910a69b2397240cfdf244578\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:43:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a6a968fb55017d7e2fee878bb4d50904766820de016afd8f01de3bbd92b2421\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a6a968fb55017d7e2fee878bb4d50904766820de016afd8f01de3bbd92b2421\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:43:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:43:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b9v8b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:51Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.317421 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f826000-be5b-4f8f-bdc5-b80e11bb5e65\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cac876542527f108f89313704d6275aed6b735176f7f38b0fccbfcd79fdbf6e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa9a560543d545bd50cbb9aa0e907a992f9b3afb36de7ec5e72010dd835d2574\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c86055d41460f757efc29eaa62834faf3f14f9ca5ba534479d0fcd0a43d3bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bca102c9301a0963f4be54906a6ca418a585d0aa6b063a0512c2a334928f0d88\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T13:42:47Z\\\",\\\"message\\\":\\\"le observer\\\\nW1121 13:42:47.565555 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 13:42:47.567527 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 13:42:47.569658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3912986073/tls.crt::/tmp/serving-cert-3912986073/tls.key\\\\\\\"\\\\nI1121 13:42:47.852533 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 13:42:47.856751 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 13:42:47.856781 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 13:42:47.856814 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 13:42:47.856821 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 13:42:47.862211 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 13:42:47.862280 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862290 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862309 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 13:42:47.862319 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 13:42:47.862326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 13:42:47.862333 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 13:42:47.863057 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 13:42:47.865438 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6b16c41d8bc248fc4de65102a71d3875d1ab768432f61581605fa487ebfc9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:51Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.330921 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede95ef8b82acda5cadd081a37fcb2a35fab8269c7ec403bb33a6feb8bf9eb88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3037d1f01bc9704cae9aa3eb4760e4dc737b1990a6ae5a007d3ec412efad85a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:51Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.344859 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:51Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.355876 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1236e5c5c7c8db59fd2faa688e9b781fe94721cc8aa644dd9ab91df2684c617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:51Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.374419 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.374471 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.374487 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.374511 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.374528 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:51Z","lastTransitionTime":"2025-11-21T13:43:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.457146 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.457180 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.457325 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-x5wnh" Nov 21 13:43:51 crc kubenswrapper[5133]: E1121 13:43:51.457357 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 13:43:51 crc kubenswrapper[5133]: E1121 13:43:51.457565 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-x5wnh" podUID="b3aabda0-97d9-4886-8909-1c423c4d3238" Nov 21 13:43:51 crc kubenswrapper[5133]: E1121 13:43:51.457627 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.476554 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.476589 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.476598 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.476610 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.476620 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:51Z","lastTransitionTime":"2025-11-21T13:43:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.579378 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.579425 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.579445 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.579463 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.579475 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:51Z","lastTransitionTime":"2025-11-21T13:43:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.682977 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.683042 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.683054 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.683076 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.683091 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:51Z","lastTransitionTime":"2025-11-21T13:43:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.786679 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.786732 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.786747 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.786765 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.786777 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:51Z","lastTransitionTime":"2025-11-21T13:43:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.893956 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.894080 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.894122 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.894179 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.894206 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:51Z","lastTransitionTime":"2025-11-21T13:43:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.998873 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.998927 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.998939 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.998959 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:51 crc kubenswrapper[5133]: I1121 13:43:51.998975 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:51Z","lastTransitionTime":"2025-11-21T13:43:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.070173 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:43:52 crc kubenswrapper[5133]: E1121 13:43:52.070391 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 13:44:56.070362933 +0000 UTC m=+155.868195211 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.072038 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-tjzm8_373d5da7-fae9-4689-9ede-6e2d69a54c02/ovnkube-controller/3.log" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.072571 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-tjzm8_373d5da7-fae9-4689-9ede-6e2d69a54c02/ovnkube-controller/2.log" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.075598 5133 generic.go:334] "Generic (PLEG): container finished" podID="373d5da7-fae9-4689-9ede-6e2d69a54c02" containerID="5ccbca4b83db30237624e807299bb17bb84a66216f27148373ea648a4c0cc962" exitCode=1 Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.075637 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" event={"ID":"373d5da7-fae9-4689-9ede-6e2d69a54c02","Type":"ContainerDied","Data":"5ccbca4b83db30237624e807299bb17bb84a66216f27148373ea648a4c0cc962"} Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.075679 5133 scope.go:117] "RemoveContainer" containerID="e71d9a11d15c18007af7493702bac6d13cc2845f32b14f37d6252bac480dbc38" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.076323 5133 scope.go:117] "RemoveContainer" containerID="5ccbca4b83db30237624e807299bb17bb84a66216f27148373ea648a4c0cc962" Nov 21 13:43:52 crc kubenswrapper[5133]: E1121 13:43:52.076464 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-tjzm8_openshift-ovn-kubernetes(373d5da7-fae9-4689-9ede-6e2d69a54c02)\"" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" podUID="373d5da7-fae9-4689-9ede-6e2d69a54c02" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.094896 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c335cd8-618b-4871-a0e2-deaa61ddc49a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5f6320dbfb8d910e52de319fe5350b435c1c9f00a2e1d5b2b953fb6d1688984\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://273a23deb0bee7d80bc12f28a4056a5b843e81cc7c411273e49c3aa0fdba5182\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c93dddad8f7a853e1302ba96f3fc6d6626b22de64c8cfd1ee63996820d0816cd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebb7634a6507b2323d36c3d57b19c374862e0bada0e81150da9db315e5812f12\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:52Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.103971 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.104037 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.104050 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.104069 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.104082 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:52Z","lastTransitionTime":"2025-11-21T13:43:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.112142 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1b5e12d17b3e683349818698223816569514a9f4ae5d14ba1f5661c472fce39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:52Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.125050 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bj52j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9cc533c-2914-45d2-97b4-d6e35361450d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b1cfd49e0e5564696bd26f92acb10ca3430f81dc3f690a51ecd7bfa14876bccb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdp6q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bj52j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:52Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.140706 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-x5wnh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3aabda0-97d9-4886-8909-1c423c4d3238\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p8kp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p8kp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:43:07Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-x5wnh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:52Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.161155 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"373d5da7-fae9-4689-9ede-6e2d69a54c02\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce076f27563e648bcbfd183634e87e0e31cedc359d0df1edc6af448b2a18f1a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b6bfce121246f367a034c172b839a31fe309cfc0f83db7ab4e48cb26d6a5145\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c53aca99f41348a8343f7a2a2afd9ca78e2e4ba6aae9bb06cdb3ed66c9d79aa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aee1dbbc6cd90fac255e86ddb27f159eba2e08dc6cc749a8eb351842330ee6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://563b9e061f37ddab57173a01efbf7bf025c470edccc47a03e7c5bb1e317a289f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd5003cc4327d8234259623232e844463af0efdc0b3e395fa3e2c30c714b872d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ccbca4b83db30237624e807299bb17bb84a66216f27148373ea648a4c0cc962\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e71d9a11d15c18007af7493702bac6d13cc2845f32b14f37d6252bac480dbc38\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T13:43:23Z\\\",\\\"message\\\":\\\"Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.110\\\\\\\", Port:8443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1121 13:43:23.558190 6798 ovn.go:134] Ensuring zone local for Pod openshift-multus/multus-additional-cni-plugins-b9v8b in node crc\\\\nI1121 13:43:23.558832 6798 obj_retry.go:386] Retry successful for *v1.Pod openshift-multus/multus-additional-cni-plugins-b9v8b after 0 failed attempt(s)\\\\nI1121 13:43:23.558866 6798 default_network_controller.go:776] Recording success event on pod openshift-multus/multus-additional-cni-plugins-b9v8b\\\\nI1121 13:43:23.558216 6798 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/multus-m5d24\\\\nI1121 13:43:23.558892 6798 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/multus-m5d24\\\\nI1121 13:43:23.558904 6798 ovn.go:134] Ensuring zone local for Pod openshift-multus/multus-m5d24 in node crc\\\\nI1121 13:43:23.558913 6798 obj_retry.go:386] Retry successful for *v1.Pod openshift-multus/multus-m5d24 after 0 failed attempt(s)\\\\nI1121 13:43:23.558921 6798 default_network_controller.go:776] Recordin\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:43:22Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5ccbca4b83db30237624e807299bb17bb84a66216f27148373ea648a4c0cc962\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T13:43:51Z\\\",\\\"message\\\":\\\"anaged:true include.release.openshift.io/self-managed-high-availability:true service.alpha.openshift.io/serving-cert-secret-name:package-server-manager-serving-cert service.alpha.openshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168 service.beta.openshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168] [{config.openshift.io/v1 ClusterVersion version 9101b518-476b-4eea-8fa6-69b0534e5caa 0xc00717227b \\\\u003cnil\\\\u003e}] [] []},Spec:ServiceSpec{Ports:[]ServicePort{ServicePort{Name:metrics,Protocol:TCP,Port:8443,TargetPort:{1 0 metrics},NodePort:0,AppProtocol:nil,},},Selector:map[string]string{app: package-server-manager,},ClusterIP:10.217.4.110,Type:ClusterIP,ExternalIPs:[],SessionAffinity:None,LoadBalancerIP:,LoadBalancerSourceRanges:[],ExternalName:,ExternalTrafficPolicy:,HealthCheckNodePort:0,PublishNotReadyAddresses:false,SessionAffinityConfig:nil,IPFamilyPolicy:*SingleStack,ClusterIPs:[10.217.4.110],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nF1121 13:43:51.638092 7192 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:43:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ab3fdf87c8fc052cd429333579ede0e857fcc8399de947f661b159e6a5f2a93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tjzm8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:52Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.171743 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.171806 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.171839 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.171906 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:43:52 crc kubenswrapper[5133]: E1121 13:43:52.172228 5133 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 21 13:43:52 crc kubenswrapper[5133]: E1121 13:43:52.172309 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-21 13:44:56.172291686 +0000 UTC m=+155.970123934 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 21 13:43:52 crc kubenswrapper[5133]: E1121 13:43:52.172364 5133 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 21 13:43:52 crc kubenswrapper[5133]: E1121 13:43:52.172393 5133 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 21 13:43:52 crc kubenswrapper[5133]: E1121 13:43:52.172409 5133 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 21 13:43:52 crc kubenswrapper[5133]: E1121 13:43:52.172419 5133 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 21 13:43:52 crc kubenswrapper[5133]: E1121 13:43:52.172426 5133 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 13:43:52 crc kubenswrapper[5133]: E1121 13:43:52.172434 5133 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 13:43:52 crc kubenswrapper[5133]: E1121 13:43:52.172393 5133 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 21 13:43:52 crc kubenswrapper[5133]: E1121 13:43:52.172479 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-21 13:44:56.172462761 +0000 UTC m=+155.970295059 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 13:43:52 crc kubenswrapper[5133]: E1121 13:43:52.172495 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-21 13:44:56.172488622 +0000 UTC m=+155.970320870 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 21 13:43:52 crc kubenswrapper[5133]: E1121 13:43:52.172510 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-21 13:44:56.172504212 +0000 UTC m=+155.970336460 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.175326 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b9v8b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0ec3a98-4d89-4f36-a79e-ac65da8672ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://933bd113027e6a7ee2eba33df125dcdcf389c21b99c7dd32bc654edaf7278e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d21a06ad72a199d989c29326725c9d49df6fbb9fc6e9d54bcd1f7bb89c78b02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6d21a06ad72a199d989c29326725c9d49df6fbb9fc6e9d54bcd1f7bb89c78b02\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7216da1e17ab61329ca71325b40fdbd040dbf83f072d565302a571acb1313e53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7216da1e17ab61329ca71325b40fdbd040dbf83f072d565302a571acb1313e53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a466e70f0711cd7d64221f62309d99dfa9e4b9910a69b2397240cfdf244578\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3a466e70f0711cd7d64221f62309d99dfa9e4b9910a69b2397240cfdf244578\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:43:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a6a968fb55017d7e2fee878bb4d50904766820de016afd8f01de3bbd92b2421\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a6a968fb55017d7e2fee878bb4d50904766820de016afd8f01de3bbd92b2421\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:43:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:43:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b9v8b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:52Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.189343 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f826000-be5b-4f8f-bdc5-b80e11bb5e65\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cac876542527f108f89313704d6275aed6b735176f7f38b0fccbfcd79fdbf6e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa9a560543d545bd50cbb9aa0e907a992f9b3afb36de7ec5e72010dd835d2574\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c86055d41460f757efc29eaa62834faf3f14f9ca5ba534479d0fcd0a43d3bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bca102c9301a0963f4be54906a6ca418a585d0aa6b063a0512c2a334928f0d88\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T13:42:47Z\\\",\\\"message\\\":\\\"le observer\\\\nW1121 13:42:47.565555 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 13:42:47.567527 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 13:42:47.569658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3912986073/tls.crt::/tmp/serving-cert-3912986073/tls.key\\\\\\\"\\\\nI1121 13:42:47.852533 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 13:42:47.856751 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 13:42:47.856781 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 13:42:47.856814 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 13:42:47.856821 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 13:42:47.862211 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 13:42:47.862280 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862290 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862309 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 13:42:47.862319 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 13:42:47.862326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 13:42:47.862333 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 13:42:47.863057 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 13:42:47.865438 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6b16c41d8bc248fc4de65102a71d3875d1ab768432f61581605fa487ebfc9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:52Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.201586 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede95ef8b82acda5cadd081a37fcb2a35fab8269c7ec403bb33a6feb8bf9eb88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3037d1f01bc9704cae9aa3eb4760e4dc737b1990a6ae5a007d3ec412efad85a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:52Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.205776 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.205808 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.205820 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.205836 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.205849 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:52Z","lastTransitionTime":"2025-11-21T13:43:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.216412 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:52Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.230253 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1236e5c5c7c8db59fd2faa688e9b781fe94721cc8aa644dd9ab91df2684c617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:52Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.245377 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m5d24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0077329a-abad-4c6d-a601-2dc01fd83184\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://49d78ec51ff96fc1ff9c0e668751296475767d8bf09673ff5d0f1a9d896d6595\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://24fe246ff402a8854ee5e55ccc507a2e497dbb2cdfed3f0f8b380f00b9436661\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T13:43:40Z\\\",\\\"message\\\":\\\"2025-11-21T13:42:55+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_479ae883-3ec6-417b-ad49-d387b81ff874\\\\n2025-11-21T13:42:55+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_479ae883-3ec6-417b-ad49-d387b81ff874 to /host/opt/cni/bin/\\\\n2025-11-21T13:42:55Z [verbose] multus-daemon started\\\\n2025-11-21T13:42:55Z [verbose] Readiness Indicator file check\\\\n2025-11-21T13:43:40Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmd8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m5d24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:52Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.259171 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f5a729-05d1-4f84-a216-1df3233af57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c46519115b067feef9d8fb5783b8b9420bf99d97515021a7d389e6cdf1d64112\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e5c730e837240b2ed45dff8a5411b8b49d21e7fbfb2dfcc6aef568b73b57745\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xxlvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:52Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.272713 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75a51560-1657-43fa-880f-bece0b75e088\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1dc13dd497fa4611689b3c047c602511745dbff2b9a797f8ea7316046531717\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca37cc062dd8763ac13b5a07fcd06f2997b1ff9cfe38d9a9ff3091980d679932\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57552cf42d0ae179e9c1c67120613da40995fbe308e06dbd466d0f71167142b2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f00c6fbd3ed9ce5e2787b591f39bd80828b96dd53f15696adfe62c3df47ed47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f00c6fbd3ed9ce5e2787b591f39bd80828b96dd53f15696adfe62c3df47ed47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:52Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.285980 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e4ae5c5-33df-4caa-9e92-c7070da4d494\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e8fe024377d7d7d21f5e1826640081a7cada1c3ac389b94deccdc80360afc56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e019adb31c6276b82ca39edac3f8d20a563baeb5bc34dbd64df79c9ae690b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3e019adb31c6276b82ca39edac3f8d20a563baeb5bc34dbd64df79c9ae690b5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:52Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.299242 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:52Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.308525 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.309213 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.309236 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.309252 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.309265 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:52Z","lastTransitionTime":"2025-11-21T13:43:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.310523 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7vfdg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c337e24-9cef-4932-92ae-5a175379c77a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0eeb4cc8e3340d9eb9710ca7b55244d4055d6bc7cc31a6d227f233988b242823\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrzcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc648165e145d8ed6a43aff3ffb558380dc55ffd03816922b643bf7f740088fa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrzcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:43:05Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7vfdg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:52Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.322335 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:52Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.332083 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pvdwc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87822156-53e8-4eb5-b241-db506a21a1b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc374f8a6deccf60de941df327adc9f29d951a10746bd754c0d4f5573a141a71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jzt65\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pvdwc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:52Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.411100 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.411139 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.411148 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.411161 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.411170 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:52Z","lastTransitionTime":"2025-11-21T13:43:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.456656 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:43:52 crc kubenswrapper[5133]: E1121 13:43:52.457035 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.474398 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75a51560-1657-43fa-880f-bece0b75e088\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1dc13dd497fa4611689b3c047c602511745dbff2b9a797f8ea7316046531717\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca37cc062dd8763ac13b5a07fcd06f2997b1ff9cfe38d9a9ff3091980d679932\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57552cf42d0ae179e9c1c67120613da40995fbe308e06dbd466d0f71167142b2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f00c6fbd3ed9ce5e2787b591f39bd80828b96dd53f15696adfe62c3df47ed47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f00c6fbd3ed9ce5e2787b591f39bd80828b96dd53f15696adfe62c3df47ed47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:52Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.490940 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e4ae5c5-33df-4caa-9e92-c7070da4d494\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e8fe024377d7d7d21f5e1826640081a7cada1c3ac389b94deccdc80360afc56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e019adb31c6276b82ca39edac3f8d20a563baeb5bc34dbd64df79c9ae690b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3e019adb31c6276b82ca39edac3f8d20a563baeb5bc34dbd64df79c9ae690b5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:52Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.512665 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:52Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.514786 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.514878 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.514897 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.514957 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.514977 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:52Z","lastTransitionTime":"2025-11-21T13:43:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.533471 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7vfdg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c337e24-9cef-4932-92ae-5a175379c77a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0eeb4cc8e3340d9eb9710ca7b55244d4055d6bc7cc31a6d227f233988b242823\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrzcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc648165e145d8ed6a43aff3ffb558380dc55ffd03816922b643bf7f740088fa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrzcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:43:05Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7vfdg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:52Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.554155 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:52Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.567051 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pvdwc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87822156-53e8-4eb5-b241-db506a21a1b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc374f8a6deccf60de941df327adc9f29d951a10746bd754c0d4f5573a141a71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jzt65\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pvdwc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:52Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.581672 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c335cd8-618b-4871-a0e2-deaa61ddc49a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5f6320dbfb8d910e52de319fe5350b435c1c9f00a2e1d5b2b953fb6d1688984\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://273a23deb0bee7d80bc12f28a4056a5b843e81cc7c411273e49c3aa0fdba5182\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c93dddad8f7a853e1302ba96f3fc6d6626b22de64c8cfd1ee63996820d0816cd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebb7634a6507b2323d36c3d57b19c374862e0bada0e81150da9db315e5812f12\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:52Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.602468 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1b5e12d17b3e683349818698223816569514a9f4ae5d14ba1f5661c472fce39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:52Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.616820 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bj52j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9cc533c-2914-45d2-97b4-d6e35361450d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b1cfd49e0e5564696bd26f92acb10ca3430f81dc3f690a51ecd7bfa14876bccb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdp6q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bj52j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:52Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.617923 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.617960 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.617970 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.617987 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.618018 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:52Z","lastTransitionTime":"2025-11-21T13:43:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.631306 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-x5wnh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3aabda0-97d9-4886-8909-1c423c4d3238\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p8kp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p8kp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:43:07Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-x5wnh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:52Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.652898 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b9v8b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0ec3a98-4d89-4f36-a79e-ac65da8672ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://933bd113027e6a7ee2eba33df125dcdcf389c21b99c7dd32bc654edaf7278e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d21a06ad72a199d989c29326725c9d49df6fbb9fc6e9d54bcd1f7bb89c78b02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6d21a06ad72a199d989c29326725c9d49df6fbb9fc6e9d54bcd1f7bb89c78b02\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7216da1e17ab61329ca71325b40fdbd040dbf83f072d565302a571acb1313e53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7216da1e17ab61329ca71325b40fdbd040dbf83f072d565302a571acb1313e53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a466e70f0711cd7d64221f62309d99dfa9e4b9910a69b2397240cfdf244578\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3a466e70f0711cd7d64221f62309d99dfa9e4b9910a69b2397240cfdf244578\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:43:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a6a968fb55017d7e2fee878bb4d50904766820de016afd8f01de3bbd92b2421\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a6a968fb55017d7e2fee878bb4d50904766820de016afd8f01de3bbd92b2421\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:43:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:43:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b9v8b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:52Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.682941 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f826000-be5b-4f8f-bdc5-b80e11bb5e65\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cac876542527f108f89313704d6275aed6b735176f7f38b0fccbfcd79fdbf6e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa9a560543d545bd50cbb9aa0e907a992f9b3afb36de7ec5e72010dd835d2574\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c86055d41460f757efc29eaa62834faf3f14f9ca5ba534479d0fcd0a43d3bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bca102c9301a0963f4be54906a6ca418a585d0aa6b063a0512c2a334928f0d88\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T13:42:47Z\\\",\\\"message\\\":\\\"le observer\\\\nW1121 13:42:47.565555 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 13:42:47.567527 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 13:42:47.569658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3912986073/tls.crt::/tmp/serving-cert-3912986073/tls.key\\\\\\\"\\\\nI1121 13:42:47.852533 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 13:42:47.856751 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 13:42:47.856781 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 13:42:47.856814 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 13:42:47.856821 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 13:42:47.862211 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 13:42:47.862280 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862290 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862309 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 13:42:47.862319 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 13:42:47.862326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 13:42:47.862333 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 13:42:47.863057 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 13:42:47.865438 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6b16c41d8bc248fc4de65102a71d3875d1ab768432f61581605fa487ebfc9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:52Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.698941 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede95ef8b82acda5cadd081a37fcb2a35fab8269c7ec403bb33a6feb8bf9eb88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3037d1f01bc9704cae9aa3eb4760e4dc737b1990a6ae5a007d3ec412efad85a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:52Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.713456 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:52Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.721317 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.721362 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.721374 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.721392 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.721405 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:52Z","lastTransitionTime":"2025-11-21T13:43:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.726382 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1236e5c5c7c8db59fd2faa688e9b781fe94721cc8aa644dd9ab91df2684c617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:52Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.738103 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m5d24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0077329a-abad-4c6d-a601-2dc01fd83184\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://49d78ec51ff96fc1ff9c0e668751296475767d8bf09673ff5d0f1a9d896d6595\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://24fe246ff402a8854ee5e55ccc507a2e497dbb2cdfed3f0f8b380f00b9436661\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T13:43:40Z\\\",\\\"message\\\":\\\"2025-11-21T13:42:55+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_479ae883-3ec6-417b-ad49-d387b81ff874\\\\n2025-11-21T13:42:55+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_479ae883-3ec6-417b-ad49-d387b81ff874 to /host/opt/cni/bin/\\\\n2025-11-21T13:42:55Z [verbose] multus-daemon started\\\\n2025-11-21T13:42:55Z [verbose] Readiness Indicator file check\\\\n2025-11-21T13:43:40Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmd8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m5d24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:52Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.751126 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f5a729-05d1-4f84-a216-1df3233af57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c46519115b067feef9d8fb5783b8b9420bf99d97515021a7d389e6cdf1d64112\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e5c730e837240b2ed45dff8a5411b8b49d21e7fbfb2dfcc6aef568b73b57745\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xxlvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:52Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.769033 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"373d5da7-fae9-4689-9ede-6e2d69a54c02\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce076f27563e648bcbfd183634e87e0e31cedc359d0df1edc6af448b2a18f1a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b6bfce121246f367a034c172b839a31fe309cfc0f83db7ab4e48cb26d6a5145\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c53aca99f41348a8343f7a2a2afd9ca78e2e4ba6aae9bb06cdb3ed66c9d79aa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aee1dbbc6cd90fac255e86ddb27f159eba2e08dc6cc749a8eb351842330ee6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://563b9e061f37ddab57173a01efbf7bf025c470edccc47a03e7c5bb1e317a289f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd5003cc4327d8234259623232e844463af0efdc0b3e395fa3e2c30c714b872d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ccbca4b83db30237624e807299bb17bb84a66216f27148373ea648a4c0cc962\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e71d9a11d15c18007af7493702bac6d13cc2845f32b14f37d6252bac480dbc38\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T13:43:23Z\\\",\\\"message\\\":\\\"Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.110\\\\\\\", Port:8443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1121 13:43:23.558190 6798 ovn.go:134] Ensuring zone local for Pod openshift-multus/multus-additional-cni-plugins-b9v8b in node crc\\\\nI1121 13:43:23.558832 6798 obj_retry.go:386] Retry successful for *v1.Pod openshift-multus/multus-additional-cni-plugins-b9v8b after 0 failed attempt(s)\\\\nI1121 13:43:23.558866 6798 default_network_controller.go:776] Recording success event on pod openshift-multus/multus-additional-cni-plugins-b9v8b\\\\nI1121 13:43:23.558216 6798 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/multus-m5d24\\\\nI1121 13:43:23.558892 6798 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/multus-m5d24\\\\nI1121 13:43:23.558904 6798 ovn.go:134] Ensuring zone local for Pod openshift-multus/multus-m5d24 in node crc\\\\nI1121 13:43:23.558913 6798 obj_retry.go:386] Retry successful for *v1.Pod openshift-multus/multus-m5d24 after 0 failed attempt(s)\\\\nI1121 13:43:23.558921 6798 default_network_controller.go:776] Recordin\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:43:22Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5ccbca4b83db30237624e807299bb17bb84a66216f27148373ea648a4c0cc962\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T13:43:51Z\\\",\\\"message\\\":\\\"anaged:true include.release.openshift.io/self-managed-high-availability:true service.alpha.openshift.io/serving-cert-secret-name:package-server-manager-serving-cert service.alpha.openshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168 service.beta.openshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168] [{config.openshift.io/v1 ClusterVersion version 9101b518-476b-4eea-8fa6-69b0534e5caa 0xc00717227b \\\\u003cnil\\\\u003e}] [] []},Spec:ServiceSpec{Ports:[]ServicePort{ServicePort{Name:metrics,Protocol:TCP,Port:8443,TargetPort:{1 0 metrics},NodePort:0,AppProtocol:nil,},},Selector:map[string]string{app: package-server-manager,},ClusterIP:10.217.4.110,Type:ClusterIP,ExternalIPs:[],SessionAffinity:None,LoadBalancerIP:,LoadBalancerSourceRanges:[],ExternalName:,ExternalTrafficPolicy:,HealthCheckNodePort:0,PublishNotReadyAddresses:false,SessionAffinityConfig:nil,IPFamilyPolicy:*SingleStack,ClusterIPs:[10.217.4.110],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nF1121 13:43:51.638092 7192 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:43:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ab3fdf87c8fc052cd429333579ede0e857fcc8399de947f661b159e6a5f2a93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tjzm8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:52Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.824197 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.824230 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.824238 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.824251 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.824259 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:52Z","lastTransitionTime":"2025-11-21T13:43:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.927066 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.927131 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.927149 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.927175 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:52 crc kubenswrapper[5133]: I1121 13:43:52.927193 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:52Z","lastTransitionTime":"2025-11-21T13:43:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.031208 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.031275 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.031297 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.031329 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.031354 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:53Z","lastTransitionTime":"2025-11-21T13:43:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.083464 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-tjzm8_373d5da7-fae9-4689-9ede-6e2d69a54c02/ovnkube-controller/3.log" Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.091358 5133 scope.go:117] "RemoveContainer" containerID="5ccbca4b83db30237624e807299bb17bb84a66216f27148373ea648a4c0cc962" Nov 21 13:43:53 crc kubenswrapper[5133]: E1121 13:43:53.091697 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-tjzm8_openshift-ovn-kubernetes(373d5da7-fae9-4689-9ede-6e2d69a54c02)\"" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" podUID="373d5da7-fae9-4689-9ede-6e2d69a54c02" Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.110735 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.130386 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pvdwc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87822156-53e8-4eb5-b241-db506a21a1b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc374f8a6deccf60de941df327adc9f29d951a10746bd754c0d4f5573a141a71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jzt65\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pvdwc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.134733 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.134777 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.134795 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.134819 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.134838 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:53Z","lastTransitionTime":"2025-11-21T13:43:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.147270 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bj52j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9cc533c-2914-45d2-97b4-d6e35361450d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b1cfd49e0e5564696bd26f92acb10ca3430f81dc3f690a51ecd7bfa14876bccb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdp6q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bj52j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.162611 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-x5wnh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3aabda0-97d9-4886-8909-1c423c4d3238\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p8kp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p8kp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:43:07Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-x5wnh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.182310 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c335cd8-618b-4871-a0e2-deaa61ddc49a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5f6320dbfb8d910e52de319fe5350b435c1c9f00a2e1d5b2b953fb6d1688984\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://273a23deb0bee7d80bc12f28a4056a5b843e81cc7c411273e49c3aa0fdba5182\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c93dddad8f7a853e1302ba96f3fc6d6626b22de64c8cfd1ee63996820d0816cd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebb7634a6507b2323d36c3d57b19c374862e0bada0e81150da9db315e5812f12\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.198800 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1b5e12d17b3e683349818698223816569514a9f4ae5d14ba1f5661c472fce39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.216805 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.232613 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1236e5c5c7c8db59fd2faa688e9b781fe94721cc8aa644dd9ab91df2684c617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.237558 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.237613 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.237628 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.237648 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.237664 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:53Z","lastTransitionTime":"2025-11-21T13:43:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.249799 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m5d24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0077329a-abad-4c6d-a601-2dc01fd83184\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://49d78ec51ff96fc1ff9c0e668751296475767d8bf09673ff5d0f1a9d896d6595\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://24fe246ff402a8854ee5e55ccc507a2e497dbb2cdfed3f0f8b380f00b9436661\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T13:43:40Z\\\",\\\"message\\\":\\\"2025-11-21T13:42:55+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_479ae883-3ec6-417b-ad49-d387b81ff874\\\\n2025-11-21T13:42:55+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_479ae883-3ec6-417b-ad49-d387b81ff874 to /host/opt/cni/bin/\\\\n2025-11-21T13:42:55Z [verbose] multus-daemon started\\\\n2025-11-21T13:42:55Z [verbose] Readiness Indicator file check\\\\n2025-11-21T13:43:40Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmd8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m5d24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.265196 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f5a729-05d1-4f84-a216-1df3233af57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c46519115b067feef9d8fb5783b8b9420bf99d97515021a7d389e6cdf1d64112\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e5c730e837240b2ed45dff8a5411b8b49d21e7fbfb2dfcc6aef568b73b57745\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xxlvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.284548 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"373d5da7-fae9-4689-9ede-6e2d69a54c02\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce076f27563e648bcbfd183634e87e0e31cedc359d0df1edc6af448b2a18f1a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b6bfce121246f367a034c172b839a31fe309cfc0f83db7ab4e48cb26d6a5145\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c53aca99f41348a8343f7a2a2afd9ca78e2e4ba6aae9bb06cdb3ed66c9d79aa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aee1dbbc6cd90fac255e86ddb27f159eba2e08dc6cc749a8eb351842330ee6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://563b9e061f37ddab57173a01efbf7bf025c470edccc47a03e7c5bb1e317a289f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd5003cc4327d8234259623232e844463af0efdc0b3e395fa3e2c30c714b872d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ccbca4b83db30237624e807299bb17bb84a66216f27148373ea648a4c0cc962\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5ccbca4b83db30237624e807299bb17bb84a66216f27148373ea648a4c0cc962\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T13:43:51Z\\\",\\\"message\\\":\\\"anaged:true include.release.openshift.io/self-managed-high-availability:true service.alpha.openshift.io/serving-cert-secret-name:package-server-manager-serving-cert service.alpha.openshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168 service.beta.openshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168] [{config.openshift.io/v1 ClusterVersion version 9101b518-476b-4eea-8fa6-69b0534e5caa 0xc00717227b \\\\u003cnil\\\\u003e}] [] []},Spec:ServiceSpec{Ports:[]ServicePort{ServicePort{Name:metrics,Protocol:TCP,Port:8443,TargetPort:{1 0 metrics},NodePort:0,AppProtocol:nil,},},Selector:map[string]string{app: package-server-manager,},ClusterIP:10.217.4.110,Type:ClusterIP,ExternalIPs:[],SessionAffinity:None,LoadBalancerIP:,LoadBalancerSourceRanges:[],ExternalName:,ExternalTrafficPolicy:,HealthCheckNodePort:0,PublishNotReadyAddresses:false,SessionAffinityConfig:nil,IPFamilyPolicy:*SingleStack,ClusterIPs:[10.217.4.110],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nF1121 13:43:51.638092 7192 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:43:50Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-tjzm8_openshift-ovn-kubernetes(373d5da7-fae9-4689-9ede-6e2d69a54c02)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ab3fdf87c8fc052cd429333579ede0e857fcc8399de947f661b159e6a5f2a93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tjzm8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.302313 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b9v8b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0ec3a98-4d89-4f36-a79e-ac65da8672ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://933bd113027e6a7ee2eba33df125dcdcf389c21b99c7dd32bc654edaf7278e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d21a06ad72a199d989c29326725c9d49df6fbb9fc6e9d54bcd1f7bb89c78b02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6d21a06ad72a199d989c29326725c9d49df6fbb9fc6e9d54bcd1f7bb89c78b02\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7216da1e17ab61329ca71325b40fdbd040dbf83f072d565302a571acb1313e53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7216da1e17ab61329ca71325b40fdbd040dbf83f072d565302a571acb1313e53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a466e70f0711cd7d64221f62309d99dfa9e4b9910a69b2397240cfdf244578\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3a466e70f0711cd7d64221f62309d99dfa9e4b9910a69b2397240cfdf244578\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:43:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a6a968fb55017d7e2fee878bb4d50904766820de016afd8f01de3bbd92b2421\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a6a968fb55017d7e2fee878bb4d50904766820de016afd8f01de3bbd92b2421\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:43:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:43:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b9v8b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.322193 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f826000-be5b-4f8f-bdc5-b80e11bb5e65\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cac876542527f108f89313704d6275aed6b735176f7f38b0fccbfcd79fdbf6e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa9a560543d545bd50cbb9aa0e907a992f9b3afb36de7ec5e72010dd835d2574\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c86055d41460f757efc29eaa62834faf3f14f9ca5ba534479d0fcd0a43d3bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bca102c9301a0963f4be54906a6ca418a585d0aa6b063a0512c2a334928f0d88\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T13:42:47Z\\\",\\\"message\\\":\\\"le observer\\\\nW1121 13:42:47.565555 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 13:42:47.567527 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 13:42:47.569658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3912986073/tls.crt::/tmp/serving-cert-3912986073/tls.key\\\\\\\"\\\\nI1121 13:42:47.852533 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 13:42:47.856751 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 13:42:47.856781 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 13:42:47.856814 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 13:42:47.856821 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 13:42:47.862211 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 13:42:47.862280 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862290 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862309 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 13:42:47.862319 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 13:42:47.862326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 13:42:47.862333 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 13:42:47.863057 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 13:42:47.865438 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6b16c41d8bc248fc4de65102a71d3875d1ab768432f61581605fa487ebfc9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.335240 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede95ef8b82acda5cadd081a37fcb2a35fab8269c7ec403bb33a6feb8bf9eb88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3037d1f01bc9704cae9aa3eb4760e4dc737b1990a6ae5a007d3ec412efad85a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.339684 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.339733 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.339743 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.339756 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.339765 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:53Z","lastTransitionTime":"2025-11-21T13:43:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.352074 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.364568 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7vfdg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c337e24-9cef-4932-92ae-5a175379c77a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0eeb4cc8e3340d9eb9710ca7b55244d4055d6bc7cc31a6d227f233988b242823\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrzcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc648165e145d8ed6a43aff3ffb558380dc55ffd03816922b643bf7f740088fa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrzcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:43:05Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7vfdg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.374765 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75a51560-1657-43fa-880f-bece0b75e088\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1dc13dd497fa4611689b3c047c602511745dbff2b9a797f8ea7316046531717\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca37cc062dd8763ac13b5a07fcd06f2997b1ff9cfe38d9a9ff3091980d679932\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57552cf42d0ae179e9c1c67120613da40995fbe308e06dbd466d0f71167142b2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f00c6fbd3ed9ce5e2787b591f39bd80828b96dd53f15696adfe62c3df47ed47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f00c6fbd3ed9ce5e2787b591f39bd80828b96dd53f15696adfe62c3df47ed47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.385442 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e4ae5c5-33df-4caa-9e92-c7070da4d494\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e8fe024377d7d7d21f5e1826640081a7cada1c3ac389b94deccdc80360afc56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e019adb31c6276b82ca39edac3f8d20a563baeb5bc34dbd64df79c9ae690b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3e019adb31c6276b82ca39edac3f8d20a563baeb5bc34dbd64df79c9ae690b5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:53Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.442746 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.442827 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.442854 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.442887 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.442912 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:53Z","lastTransitionTime":"2025-11-21T13:43:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.457327 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-x5wnh" Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.457342 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:43:53 crc kubenswrapper[5133]: E1121 13:43:53.457486 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-x5wnh" podUID="b3aabda0-97d9-4886-8909-1c423c4d3238" Nov 21 13:43:53 crc kubenswrapper[5133]: E1121 13:43:53.457548 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.457350 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:43:53 crc kubenswrapper[5133]: E1121 13:43:53.457629 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.545654 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.545698 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.545746 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.545778 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.545796 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:53Z","lastTransitionTime":"2025-11-21T13:43:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.648459 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.648511 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.648524 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.648544 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.648557 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:53Z","lastTransitionTime":"2025-11-21T13:43:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.751824 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.752468 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.752492 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.752523 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.752542 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:53Z","lastTransitionTime":"2025-11-21T13:43:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.855142 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.855231 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.855256 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.855286 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.855306 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:53Z","lastTransitionTime":"2025-11-21T13:43:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.958523 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.958573 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.958591 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.958617 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:53 crc kubenswrapper[5133]: I1121 13:43:53.958636 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:53Z","lastTransitionTime":"2025-11-21T13:43:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:54 crc kubenswrapper[5133]: I1121 13:43:54.062749 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:54 crc kubenswrapper[5133]: I1121 13:43:54.062821 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:54 crc kubenswrapper[5133]: I1121 13:43:54.062842 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:54 crc kubenswrapper[5133]: I1121 13:43:54.062870 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:54 crc kubenswrapper[5133]: I1121 13:43:54.062892 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:54Z","lastTransitionTime":"2025-11-21T13:43:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:54 crc kubenswrapper[5133]: I1121 13:43:54.166632 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:54 crc kubenswrapper[5133]: I1121 13:43:54.166690 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:54 crc kubenswrapper[5133]: I1121 13:43:54.166708 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:54 crc kubenswrapper[5133]: I1121 13:43:54.166733 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:54 crc kubenswrapper[5133]: I1121 13:43:54.166752 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:54Z","lastTransitionTime":"2025-11-21T13:43:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:54 crc kubenswrapper[5133]: I1121 13:43:54.269995 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:54 crc kubenswrapper[5133]: I1121 13:43:54.270170 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:54 crc kubenswrapper[5133]: I1121 13:43:54.270201 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:54 crc kubenswrapper[5133]: I1121 13:43:54.270232 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:54 crc kubenswrapper[5133]: I1121 13:43:54.270255 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:54Z","lastTransitionTime":"2025-11-21T13:43:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:54 crc kubenswrapper[5133]: I1121 13:43:54.373465 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:54 crc kubenswrapper[5133]: I1121 13:43:54.373533 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:54 crc kubenswrapper[5133]: I1121 13:43:54.373554 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:54 crc kubenswrapper[5133]: I1121 13:43:54.373577 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:54 crc kubenswrapper[5133]: I1121 13:43:54.373596 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:54Z","lastTransitionTime":"2025-11-21T13:43:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:54 crc kubenswrapper[5133]: I1121 13:43:54.459254 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:43:54 crc kubenswrapper[5133]: E1121 13:43:54.459460 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 13:43:54 crc kubenswrapper[5133]: I1121 13:43:54.476563 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:54 crc kubenswrapper[5133]: I1121 13:43:54.476632 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:54 crc kubenswrapper[5133]: I1121 13:43:54.476649 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:54 crc kubenswrapper[5133]: I1121 13:43:54.476675 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:54 crc kubenswrapper[5133]: I1121 13:43:54.476693 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:54Z","lastTransitionTime":"2025-11-21T13:43:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:54 crc kubenswrapper[5133]: I1121 13:43:54.580160 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:54 crc kubenswrapper[5133]: I1121 13:43:54.580312 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:54 crc kubenswrapper[5133]: I1121 13:43:54.580331 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:54 crc kubenswrapper[5133]: I1121 13:43:54.580354 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:54 crc kubenswrapper[5133]: I1121 13:43:54.580372 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:54Z","lastTransitionTime":"2025-11-21T13:43:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:54 crc kubenswrapper[5133]: I1121 13:43:54.682909 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:54 crc kubenswrapper[5133]: I1121 13:43:54.682984 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:54 crc kubenswrapper[5133]: I1121 13:43:54.683033 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:54 crc kubenswrapper[5133]: I1121 13:43:54.683063 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:54 crc kubenswrapper[5133]: I1121 13:43:54.683082 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:54Z","lastTransitionTime":"2025-11-21T13:43:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:54 crc kubenswrapper[5133]: I1121 13:43:54.786293 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:54 crc kubenswrapper[5133]: I1121 13:43:54.786384 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:54 crc kubenswrapper[5133]: I1121 13:43:54.786412 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:54 crc kubenswrapper[5133]: I1121 13:43:54.786442 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:54 crc kubenswrapper[5133]: I1121 13:43:54.786478 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:54Z","lastTransitionTime":"2025-11-21T13:43:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:54 crc kubenswrapper[5133]: I1121 13:43:54.890124 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:54 crc kubenswrapper[5133]: I1121 13:43:54.890185 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:54 crc kubenswrapper[5133]: I1121 13:43:54.890203 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:54 crc kubenswrapper[5133]: I1121 13:43:54.890229 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:54 crc kubenswrapper[5133]: I1121 13:43:54.890249 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:54Z","lastTransitionTime":"2025-11-21T13:43:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:54 crc kubenswrapper[5133]: I1121 13:43:54.991946 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:54 crc kubenswrapper[5133]: I1121 13:43:54.991983 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:54 crc kubenswrapper[5133]: I1121 13:43:54.991992 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:54 crc kubenswrapper[5133]: I1121 13:43:54.992026 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:54 crc kubenswrapper[5133]: I1121 13:43:54.992035 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:54Z","lastTransitionTime":"2025-11-21T13:43:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:55 crc kubenswrapper[5133]: I1121 13:43:55.096126 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:55 crc kubenswrapper[5133]: I1121 13:43:55.096253 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:55 crc kubenswrapper[5133]: I1121 13:43:55.096311 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:55 crc kubenswrapper[5133]: I1121 13:43:55.096335 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:55 crc kubenswrapper[5133]: I1121 13:43:55.096356 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:55Z","lastTransitionTime":"2025-11-21T13:43:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:55 crc kubenswrapper[5133]: I1121 13:43:55.199065 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:55 crc kubenswrapper[5133]: I1121 13:43:55.199137 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:55 crc kubenswrapper[5133]: I1121 13:43:55.199155 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:55 crc kubenswrapper[5133]: I1121 13:43:55.199178 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:55 crc kubenswrapper[5133]: I1121 13:43:55.199194 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:55Z","lastTransitionTime":"2025-11-21T13:43:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:55 crc kubenswrapper[5133]: I1121 13:43:55.302846 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:55 crc kubenswrapper[5133]: I1121 13:43:55.302896 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:55 crc kubenswrapper[5133]: I1121 13:43:55.302908 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:55 crc kubenswrapper[5133]: I1121 13:43:55.302929 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:55 crc kubenswrapper[5133]: I1121 13:43:55.302942 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:55Z","lastTransitionTime":"2025-11-21T13:43:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:55 crc kubenswrapper[5133]: I1121 13:43:55.406537 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:55 crc kubenswrapper[5133]: I1121 13:43:55.406606 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:55 crc kubenswrapper[5133]: I1121 13:43:55.406633 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:55 crc kubenswrapper[5133]: I1121 13:43:55.406660 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:55 crc kubenswrapper[5133]: I1121 13:43:55.406677 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:55Z","lastTransitionTime":"2025-11-21T13:43:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:55 crc kubenswrapper[5133]: I1121 13:43:55.456741 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:43:55 crc kubenswrapper[5133]: E1121 13:43:55.456975 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 13:43:55 crc kubenswrapper[5133]: I1121 13:43:55.457181 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-x5wnh" Nov 21 13:43:55 crc kubenswrapper[5133]: I1121 13:43:55.457397 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:43:55 crc kubenswrapper[5133]: E1121 13:43:55.457427 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-x5wnh" podUID="b3aabda0-97d9-4886-8909-1c423c4d3238" Nov 21 13:43:55 crc kubenswrapper[5133]: E1121 13:43:55.457824 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 13:43:55 crc kubenswrapper[5133]: I1121 13:43:55.509771 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:55 crc kubenswrapper[5133]: I1121 13:43:55.509827 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:55 crc kubenswrapper[5133]: I1121 13:43:55.509844 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:55 crc kubenswrapper[5133]: I1121 13:43:55.509868 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:55 crc kubenswrapper[5133]: I1121 13:43:55.509885 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:55Z","lastTransitionTime":"2025-11-21T13:43:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:55 crc kubenswrapper[5133]: I1121 13:43:55.613530 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:55 crc kubenswrapper[5133]: I1121 13:43:55.613590 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:55 crc kubenswrapper[5133]: I1121 13:43:55.613608 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:55 crc kubenswrapper[5133]: I1121 13:43:55.613631 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:55 crc kubenswrapper[5133]: I1121 13:43:55.613650 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:55Z","lastTransitionTime":"2025-11-21T13:43:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:55 crc kubenswrapper[5133]: I1121 13:43:55.717474 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:55 crc kubenswrapper[5133]: I1121 13:43:55.717517 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:55 crc kubenswrapper[5133]: I1121 13:43:55.717537 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:55 crc kubenswrapper[5133]: I1121 13:43:55.717556 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:55 crc kubenswrapper[5133]: I1121 13:43:55.717569 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:55Z","lastTransitionTime":"2025-11-21T13:43:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:55 crc kubenswrapper[5133]: I1121 13:43:55.820599 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:55 crc kubenswrapper[5133]: I1121 13:43:55.820707 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:55 crc kubenswrapper[5133]: I1121 13:43:55.820726 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:55 crc kubenswrapper[5133]: I1121 13:43:55.820751 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:55 crc kubenswrapper[5133]: I1121 13:43:55.820770 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:55Z","lastTransitionTime":"2025-11-21T13:43:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:55 crc kubenswrapper[5133]: I1121 13:43:55.923803 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:55 crc kubenswrapper[5133]: I1121 13:43:55.923839 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:55 crc kubenswrapper[5133]: I1121 13:43:55.923849 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:55 crc kubenswrapper[5133]: I1121 13:43:55.923863 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:55 crc kubenswrapper[5133]: I1121 13:43:55.923872 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:55Z","lastTransitionTime":"2025-11-21T13:43:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:56 crc kubenswrapper[5133]: I1121 13:43:56.026381 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:56 crc kubenswrapper[5133]: I1121 13:43:56.026465 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:56 crc kubenswrapper[5133]: I1121 13:43:56.026485 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:56 crc kubenswrapper[5133]: I1121 13:43:56.026513 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:56 crc kubenswrapper[5133]: I1121 13:43:56.026531 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:56Z","lastTransitionTime":"2025-11-21T13:43:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:56 crc kubenswrapper[5133]: I1121 13:43:56.129717 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:56 crc kubenswrapper[5133]: I1121 13:43:56.129794 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:56 crc kubenswrapper[5133]: I1121 13:43:56.129818 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:56 crc kubenswrapper[5133]: I1121 13:43:56.130324 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:56 crc kubenswrapper[5133]: I1121 13:43:56.130544 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:56Z","lastTransitionTime":"2025-11-21T13:43:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:56 crc kubenswrapper[5133]: I1121 13:43:56.169116 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:56 crc kubenswrapper[5133]: I1121 13:43:56.169212 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:56 crc kubenswrapper[5133]: I1121 13:43:56.169251 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:56 crc kubenswrapper[5133]: I1121 13:43:56.169291 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:56 crc kubenswrapper[5133]: I1121 13:43:56.169316 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:56Z","lastTransitionTime":"2025-11-21T13:43:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:56 crc kubenswrapper[5133]: E1121 13:43:56.187257 5133 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"eb1f691e-5306-40d5-9666-4e51161aa15a\\\",\\\"systemUUID\\\":\\\"537cb059-79e6-48e5-b353-57bb495db8a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:56Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:56 crc kubenswrapper[5133]: I1121 13:43:56.192667 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:56 crc kubenswrapper[5133]: I1121 13:43:56.192745 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:56 crc kubenswrapper[5133]: I1121 13:43:56.192770 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:56 crc kubenswrapper[5133]: I1121 13:43:56.192803 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:56 crc kubenswrapper[5133]: I1121 13:43:56.192828 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:56Z","lastTransitionTime":"2025-11-21T13:43:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:56 crc kubenswrapper[5133]: E1121 13:43:56.215676 5133 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"eb1f691e-5306-40d5-9666-4e51161aa15a\\\",\\\"systemUUID\\\":\\\"537cb059-79e6-48e5-b353-57bb495db8a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:56Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:56 crc kubenswrapper[5133]: I1121 13:43:56.220253 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:56 crc kubenswrapper[5133]: I1121 13:43:56.220317 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:56 crc kubenswrapper[5133]: I1121 13:43:56.220335 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:56 crc kubenswrapper[5133]: I1121 13:43:56.220359 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:56 crc kubenswrapper[5133]: I1121 13:43:56.220377 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:56Z","lastTransitionTime":"2025-11-21T13:43:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:56 crc kubenswrapper[5133]: E1121 13:43:56.238433 5133 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"eb1f691e-5306-40d5-9666-4e51161aa15a\\\",\\\"systemUUID\\\":\\\"537cb059-79e6-48e5-b353-57bb495db8a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:56Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:56 crc kubenswrapper[5133]: I1121 13:43:56.242306 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:56 crc kubenswrapper[5133]: I1121 13:43:56.242340 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:56 crc kubenswrapper[5133]: I1121 13:43:56.242352 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:56 crc kubenswrapper[5133]: I1121 13:43:56.242369 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:56 crc kubenswrapper[5133]: I1121 13:43:56.242382 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:56Z","lastTransitionTime":"2025-11-21T13:43:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:56 crc kubenswrapper[5133]: E1121 13:43:56.261821 5133 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"eb1f691e-5306-40d5-9666-4e51161aa15a\\\",\\\"systemUUID\\\":\\\"537cb059-79e6-48e5-b353-57bb495db8a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:56Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:56 crc kubenswrapper[5133]: I1121 13:43:56.266984 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:56 crc kubenswrapper[5133]: I1121 13:43:56.267108 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:56 crc kubenswrapper[5133]: I1121 13:43:56.267139 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:56 crc kubenswrapper[5133]: I1121 13:43:56.267194 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:56 crc kubenswrapper[5133]: I1121 13:43:56.267221 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:56Z","lastTransitionTime":"2025-11-21T13:43:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:56 crc kubenswrapper[5133]: E1121 13:43:56.285540 5133 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:43:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"eb1f691e-5306-40d5-9666-4e51161aa15a\\\",\\\"systemUUID\\\":\\\"537cb059-79e6-48e5-b353-57bb495db8a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:43:56Z is after 2025-08-24T17:21:41Z" Nov 21 13:43:56 crc kubenswrapper[5133]: E1121 13:43:56.285788 5133 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 21 13:43:56 crc kubenswrapper[5133]: I1121 13:43:56.288197 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:56 crc kubenswrapper[5133]: I1121 13:43:56.288272 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:56 crc kubenswrapper[5133]: I1121 13:43:56.288290 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:56 crc kubenswrapper[5133]: I1121 13:43:56.288317 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:56 crc kubenswrapper[5133]: I1121 13:43:56.288338 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:56Z","lastTransitionTime":"2025-11-21T13:43:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:56 crc kubenswrapper[5133]: I1121 13:43:56.392839 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:56 crc kubenswrapper[5133]: I1121 13:43:56.392922 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:56 crc kubenswrapper[5133]: I1121 13:43:56.392939 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:56 crc kubenswrapper[5133]: I1121 13:43:56.392968 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:56 crc kubenswrapper[5133]: I1121 13:43:56.392988 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:56Z","lastTransitionTime":"2025-11-21T13:43:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:56 crc kubenswrapper[5133]: I1121 13:43:56.457049 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:43:56 crc kubenswrapper[5133]: E1121 13:43:56.457273 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 13:43:56 crc kubenswrapper[5133]: I1121 13:43:56.495640 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:56 crc kubenswrapper[5133]: I1121 13:43:56.495702 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:56 crc kubenswrapper[5133]: I1121 13:43:56.495719 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:56 crc kubenswrapper[5133]: I1121 13:43:56.495741 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:56 crc kubenswrapper[5133]: I1121 13:43:56.495760 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:56Z","lastTransitionTime":"2025-11-21T13:43:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:56 crc kubenswrapper[5133]: I1121 13:43:56.600667 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:56 crc kubenswrapper[5133]: I1121 13:43:56.600738 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:56 crc kubenswrapper[5133]: I1121 13:43:56.600764 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:56 crc kubenswrapper[5133]: I1121 13:43:56.600797 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:56 crc kubenswrapper[5133]: I1121 13:43:56.600818 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:56Z","lastTransitionTime":"2025-11-21T13:43:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:56 crc kubenswrapper[5133]: I1121 13:43:56.704957 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:56 crc kubenswrapper[5133]: I1121 13:43:56.705065 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:56 crc kubenswrapper[5133]: I1121 13:43:56.705088 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:56 crc kubenswrapper[5133]: I1121 13:43:56.705115 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:56 crc kubenswrapper[5133]: I1121 13:43:56.705148 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:56Z","lastTransitionTime":"2025-11-21T13:43:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:56 crc kubenswrapper[5133]: I1121 13:43:56.811051 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:56 crc kubenswrapper[5133]: I1121 13:43:56.811136 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:56 crc kubenswrapper[5133]: I1121 13:43:56.811161 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:56 crc kubenswrapper[5133]: I1121 13:43:56.811192 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:56 crc kubenswrapper[5133]: I1121 13:43:56.811217 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:56Z","lastTransitionTime":"2025-11-21T13:43:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:56 crc kubenswrapper[5133]: I1121 13:43:56.915189 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:56 crc kubenswrapper[5133]: I1121 13:43:56.915265 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:56 crc kubenswrapper[5133]: I1121 13:43:56.915289 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:56 crc kubenswrapper[5133]: I1121 13:43:56.915325 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:56 crc kubenswrapper[5133]: I1121 13:43:56.915348 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:56Z","lastTransitionTime":"2025-11-21T13:43:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:57 crc kubenswrapper[5133]: I1121 13:43:57.019076 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:57 crc kubenswrapper[5133]: I1121 13:43:57.019149 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:57 crc kubenswrapper[5133]: I1121 13:43:57.019172 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:57 crc kubenswrapper[5133]: I1121 13:43:57.019205 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:57 crc kubenswrapper[5133]: I1121 13:43:57.019227 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:57Z","lastTransitionTime":"2025-11-21T13:43:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:57 crc kubenswrapper[5133]: I1121 13:43:57.122706 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:57 crc kubenswrapper[5133]: I1121 13:43:57.122775 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:57 crc kubenswrapper[5133]: I1121 13:43:57.122794 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:57 crc kubenswrapper[5133]: I1121 13:43:57.122822 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:57 crc kubenswrapper[5133]: I1121 13:43:57.122846 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:57Z","lastTransitionTime":"2025-11-21T13:43:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:57 crc kubenswrapper[5133]: I1121 13:43:57.226304 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:57 crc kubenswrapper[5133]: I1121 13:43:57.226366 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:57 crc kubenswrapper[5133]: I1121 13:43:57.226385 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:57 crc kubenswrapper[5133]: I1121 13:43:57.226416 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:57 crc kubenswrapper[5133]: I1121 13:43:57.226445 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:57Z","lastTransitionTime":"2025-11-21T13:43:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:57 crc kubenswrapper[5133]: I1121 13:43:57.332084 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:57 crc kubenswrapper[5133]: I1121 13:43:57.332147 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:57 crc kubenswrapper[5133]: I1121 13:43:57.332165 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:57 crc kubenswrapper[5133]: I1121 13:43:57.332189 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:57 crc kubenswrapper[5133]: I1121 13:43:57.332207 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:57Z","lastTransitionTime":"2025-11-21T13:43:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:57 crc kubenswrapper[5133]: I1121 13:43:57.443436 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:57 crc kubenswrapper[5133]: I1121 13:43:57.443486 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:57 crc kubenswrapper[5133]: I1121 13:43:57.443504 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:57 crc kubenswrapper[5133]: I1121 13:43:57.443529 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:57 crc kubenswrapper[5133]: I1121 13:43:57.443550 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:57Z","lastTransitionTime":"2025-11-21T13:43:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:57 crc kubenswrapper[5133]: I1121 13:43:57.457151 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-x5wnh" Nov 21 13:43:57 crc kubenswrapper[5133]: I1121 13:43:57.457250 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:43:57 crc kubenswrapper[5133]: I1121 13:43:57.457343 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:43:57 crc kubenswrapper[5133]: E1121 13:43:57.457476 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-x5wnh" podUID="b3aabda0-97d9-4886-8909-1c423c4d3238" Nov 21 13:43:57 crc kubenswrapper[5133]: E1121 13:43:57.457666 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 13:43:57 crc kubenswrapper[5133]: E1121 13:43:57.457758 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 13:43:57 crc kubenswrapper[5133]: I1121 13:43:57.557146 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:57 crc kubenswrapper[5133]: I1121 13:43:57.557224 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:57 crc kubenswrapper[5133]: I1121 13:43:57.557241 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:57 crc kubenswrapper[5133]: I1121 13:43:57.557274 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:57 crc kubenswrapper[5133]: I1121 13:43:57.557336 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:57Z","lastTransitionTime":"2025-11-21T13:43:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:57 crc kubenswrapper[5133]: I1121 13:43:57.660308 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:57 crc kubenswrapper[5133]: I1121 13:43:57.660363 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:57 crc kubenswrapper[5133]: I1121 13:43:57.660376 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:57 crc kubenswrapper[5133]: I1121 13:43:57.660394 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:57 crc kubenswrapper[5133]: I1121 13:43:57.660408 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:57Z","lastTransitionTime":"2025-11-21T13:43:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:57 crc kubenswrapper[5133]: I1121 13:43:57.763907 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:57 crc kubenswrapper[5133]: I1121 13:43:57.763976 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:57 crc kubenswrapper[5133]: I1121 13:43:57.764031 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:57 crc kubenswrapper[5133]: I1121 13:43:57.764066 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:57 crc kubenswrapper[5133]: I1121 13:43:57.764092 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:57Z","lastTransitionTime":"2025-11-21T13:43:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:57 crc kubenswrapper[5133]: I1121 13:43:57.868119 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:57 crc kubenswrapper[5133]: I1121 13:43:57.868198 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:57 crc kubenswrapper[5133]: I1121 13:43:57.868221 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:57 crc kubenswrapper[5133]: I1121 13:43:57.868253 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:57 crc kubenswrapper[5133]: I1121 13:43:57.868278 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:57Z","lastTransitionTime":"2025-11-21T13:43:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:57 crc kubenswrapper[5133]: I1121 13:43:57.971068 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:57 crc kubenswrapper[5133]: I1121 13:43:57.971180 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:57 crc kubenswrapper[5133]: I1121 13:43:57.971203 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:57 crc kubenswrapper[5133]: I1121 13:43:57.971236 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:57 crc kubenswrapper[5133]: I1121 13:43:57.971282 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:57Z","lastTransitionTime":"2025-11-21T13:43:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:58 crc kubenswrapper[5133]: I1121 13:43:58.075327 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:58 crc kubenswrapper[5133]: I1121 13:43:58.075394 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:58 crc kubenswrapper[5133]: I1121 13:43:58.075410 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:58 crc kubenswrapper[5133]: I1121 13:43:58.075432 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:58 crc kubenswrapper[5133]: I1121 13:43:58.075446 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:58Z","lastTransitionTime":"2025-11-21T13:43:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:58 crc kubenswrapper[5133]: I1121 13:43:58.178457 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:58 crc kubenswrapper[5133]: I1121 13:43:58.178529 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:58 crc kubenswrapper[5133]: I1121 13:43:58.178562 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:58 crc kubenswrapper[5133]: I1121 13:43:58.178592 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:58 crc kubenswrapper[5133]: I1121 13:43:58.178613 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:58Z","lastTransitionTime":"2025-11-21T13:43:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:58 crc kubenswrapper[5133]: I1121 13:43:58.283048 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:58 crc kubenswrapper[5133]: I1121 13:43:58.283111 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:58 crc kubenswrapper[5133]: I1121 13:43:58.283131 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:58 crc kubenswrapper[5133]: I1121 13:43:58.283208 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:58 crc kubenswrapper[5133]: I1121 13:43:58.283229 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:58Z","lastTransitionTime":"2025-11-21T13:43:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:58 crc kubenswrapper[5133]: I1121 13:43:58.386253 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:58 crc kubenswrapper[5133]: I1121 13:43:58.386309 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:58 crc kubenswrapper[5133]: I1121 13:43:58.386327 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:58 crc kubenswrapper[5133]: I1121 13:43:58.386352 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:58 crc kubenswrapper[5133]: I1121 13:43:58.386371 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:58Z","lastTransitionTime":"2025-11-21T13:43:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:58 crc kubenswrapper[5133]: I1121 13:43:58.457396 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:43:58 crc kubenswrapper[5133]: E1121 13:43:58.457577 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 13:43:58 crc kubenswrapper[5133]: I1121 13:43:58.489583 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:58 crc kubenswrapper[5133]: I1121 13:43:58.489638 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:58 crc kubenswrapper[5133]: I1121 13:43:58.489650 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:58 crc kubenswrapper[5133]: I1121 13:43:58.489668 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:58 crc kubenswrapper[5133]: I1121 13:43:58.489682 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:58Z","lastTransitionTime":"2025-11-21T13:43:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:58 crc kubenswrapper[5133]: I1121 13:43:58.592749 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:58 crc kubenswrapper[5133]: I1121 13:43:58.592837 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:58 crc kubenswrapper[5133]: I1121 13:43:58.592881 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:58 crc kubenswrapper[5133]: I1121 13:43:58.592923 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:58 crc kubenswrapper[5133]: I1121 13:43:58.592947 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:58Z","lastTransitionTime":"2025-11-21T13:43:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:58 crc kubenswrapper[5133]: I1121 13:43:58.695731 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:58 crc kubenswrapper[5133]: I1121 13:43:58.695795 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:58 crc kubenswrapper[5133]: I1121 13:43:58.695812 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:58 crc kubenswrapper[5133]: I1121 13:43:58.695837 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:58 crc kubenswrapper[5133]: I1121 13:43:58.695855 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:58Z","lastTransitionTime":"2025-11-21T13:43:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:58 crc kubenswrapper[5133]: I1121 13:43:58.799581 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:58 crc kubenswrapper[5133]: I1121 13:43:58.799686 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:58 crc kubenswrapper[5133]: I1121 13:43:58.799713 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:58 crc kubenswrapper[5133]: I1121 13:43:58.799751 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:58 crc kubenswrapper[5133]: I1121 13:43:58.799778 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:58Z","lastTransitionTime":"2025-11-21T13:43:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:58 crc kubenswrapper[5133]: I1121 13:43:58.903441 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:58 crc kubenswrapper[5133]: I1121 13:43:58.903987 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:58 crc kubenswrapper[5133]: I1121 13:43:58.904017 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:58 crc kubenswrapper[5133]: I1121 13:43:58.904036 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:58 crc kubenswrapper[5133]: I1121 13:43:58.904050 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:58Z","lastTransitionTime":"2025-11-21T13:43:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:59 crc kubenswrapper[5133]: I1121 13:43:59.006581 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:59 crc kubenswrapper[5133]: I1121 13:43:59.006644 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:59 crc kubenswrapper[5133]: I1121 13:43:59.006666 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:59 crc kubenswrapper[5133]: I1121 13:43:59.006697 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:59 crc kubenswrapper[5133]: I1121 13:43:59.006723 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:59Z","lastTransitionTime":"2025-11-21T13:43:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:59 crc kubenswrapper[5133]: I1121 13:43:59.110658 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:59 crc kubenswrapper[5133]: I1121 13:43:59.110733 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:59 crc kubenswrapper[5133]: I1121 13:43:59.110760 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:59 crc kubenswrapper[5133]: I1121 13:43:59.110795 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:59 crc kubenswrapper[5133]: I1121 13:43:59.110818 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:59Z","lastTransitionTime":"2025-11-21T13:43:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:59 crc kubenswrapper[5133]: I1121 13:43:59.214557 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:59 crc kubenswrapper[5133]: I1121 13:43:59.214630 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:59 crc kubenswrapper[5133]: I1121 13:43:59.214652 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:59 crc kubenswrapper[5133]: I1121 13:43:59.214684 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:59 crc kubenswrapper[5133]: I1121 13:43:59.214708 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:59Z","lastTransitionTime":"2025-11-21T13:43:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:59 crc kubenswrapper[5133]: I1121 13:43:59.318827 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:59 crc kubenswrapper[5133]: I1121 13:43:59.318904 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:59 crc kubenswrapper[5133]: I1121 13:43:59.318937 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:59 crc kubenswrapper[5133]: I1121 13:43:59.318966 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:59 crc kubenswrapper[5133]: I1121 13:43:59.319067 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:59Z","lastTransitionTime":"2025-11-21T13:43:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:59 crc kubenswrapper[5133]: I1121 13:43:59.422420 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:59 crc kubenswrapper[5133]: I1121 13:43:59.422506 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:59 crc kubenswrapper[5133]: I1121 13:43:59.422525 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:59 crc kubenswrapper[5133]: I1121 13:43:59.422555 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:59 crc kubenswrapper[5133]: I1121 13:43:59.422573 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:59Z","lastTransitionTime":"2025-11-21T13:43:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:59 crc kubenswrapper[5133]: I1121 13:43:59.457316 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:43:59 crc kubenswrapper[5133]: I1121 13:43:59.457405 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-x5wnh" Nov 21 13:43:59 crc kubenswrapper[5133]: I1121 13:43:59.457325 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:43:59 crc kubenswrapper[5133]: E1121 13:43:59.457568 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 13:43:59 crc kubenswrapper[5133]: E1121 13:43:59.457687 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-x5wnh" podUID="b3aabda0-97d9-4886-8909-1c423c4d3238" Nov 21 13:43:59 crc kubenswrapper[5133]: E1121 13:43:59.457834 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 13:43:59 crc kubenswrapper[5133]: I1121 13:43:59.526411 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:59 crc kubenswrapper[5133]: I1121 13:43:59.526460 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:59 crc kubenswrapper[5133]: I1121 13:43:59.526470 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:59 crc kubenswrapper[5133]: I1121 13:43:59.526487 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:59 crc kubenswrapper[5133]: I1121 13:43:59.526499 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:59Z","lastTransitionTime":"2025-11-21T13:43:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:59 crc kubenswrapper[5133]: I1121 13:43:59.630588 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:59 crc kubenswrapper[5133]: I1121 13:43:59.630661 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:59 crc kubenswrapper[5133]: I1121 13:43:59.630683 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:59 crc kubenswrapper[5133]: I1121 13:43:59.630712 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:59 crc kubenswrapper[5133]: I1121 13:43:59.630741 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:59Z","lastTransitionTime":"2025-11-21T13:43:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:59 crc kubenswrapper[5133]: I1121 13:43:59.734415 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:59 crc kubenswrapper[5133]: I1121 13:43:59.734493 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:59 crc kubenswrapper[5133]: I1121 13:43:59.734512 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:59 crc kubenswrapper[5133]: I1121 13:43:59.734547 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:59 crc kubenswrapper[5133]: I1121 13:43:59.734574 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:59Z","lastTransitionTime":"2025-11-21T13:43:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:59 crc kubenswrapper[5133]: I1121 13:43:59.838638 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:59 crc kubenswrapper[5133]: I1121 13:43:59.838767 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:59 crc kubenswrapper[5133]: I1121 13:43:59.838792 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:59 crc kubenswrapper[5133]: I1121 13:43:59.838824 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:59 crc kubenswrapper[5133]: I1121 13:43:59.838846 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:59Z","lastTransitionTime":"2025-11-21T13:43:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:43:59 crc kubenswrapper[5133]: I1121 13:43:59.943302 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:43:59 crc kubenswrapper[5133]: I1121 13:43:59.943363 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:43:59 crc kubenswrapper[5133]: I1121 13:43:59.943380 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:43:59 crc kubenswrapper[5133]: I1121 13:43:59.943404 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:43:59 crc kubenswrapper[5133]: I1121 13:43:59.943420 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:43:59Z","lastTransitionTime":"2025-11-21T13:43:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:00 crc kubenswrapper[5133]: I1121 13:44:00.047645 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:00 crc kubenswrapper[5133]: I1121 13:44:00.047718 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:00 crc kubenswrapper[5133]: I1121 13:44:00.047734 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:00 crc kubenswrapper[5133]: I1121 13:44:00.047760 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:00 crc kubenswrapper[5133]: I1121 13:44:00.047777 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:00Z","lastTransitionTime":"2025-11-21T13:44:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:00 crc kubenswrapper[5133]: I1121 13:44:00.151072 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:00 crc kubenswrapper[5133]: I1121 13:44:00.151141 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:00 crc kubenswrapper[5133]: I1121 13:44:00.151161 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:00 crc kubenswrapper[5133]: I1121 13:44:00.151188 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:00 crc kubenswrapper[5133]: I1121 13:44:00.151208 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:00Z","lastTransitionTime":"2025-11-21T13:44:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:00 crc kubenswrapper[5133]: I1121 13:44:00.254596 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:00 crc kubenswrapper[5133]: I1121 13:44:00.254682 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:00 crc kubenswrapper[5133]: I1121 13:44:00.254706 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:00 crc kubenswrapper[5133]: I1121 13:44:00.254738 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:00 crc kubenswrapper[5133]: I1121 13:44:00.254761 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:00Z","lastTransitionTime":"2025-11-21T13:44:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:00 crc kubenswrapper[5133]: I1121 13:44:00.359593 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:00 crc kubenswrapper[5133]: I1121 13:44:00.359687 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:00 crc kubenswrapper[5133]: I1121 13:44:00.359710 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:00 crc kubenswrapper[5133]: I1121 13:44:00.359742 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:00 crc kubenswrapper[5133]: I1121 13:44:00.359762 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:00Z","lastTransitionTime":"2025-11-21T13:44:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:00 crc kubenswrapper[5133]: I1121 13:44:00.457267 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:44:00 crc kubenswrapper[5133]: E1121 13:44:00.457509 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 13:44:00 crc kubenswrapper[5133]: I1121 13:44:00.462771 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:00 crc kubenswrapper[5133]: I1121 13:44:00.462821 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:00 crc kubenswrapper[5133]: I1121 13:44:00.462844 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:00 crc kubenswrapper[5133]: I1121 13:44:00.462872 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:00 crc kubenswrapper[5133]: I1121 13:44:00.462893 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:00Z","lastTransitionTime":"2025-11-21T13:44:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:00 crc kubenswrapper[5133]: I1121 13:44:00.566102 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:00 crc kubenswrapper[5133]: I1121 13:44:00.566183 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:00 crc kubenswrapper[5133]: I1121 13:44:00.566202 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:00 crc kubenswrapper[5133]: I1121 13:44:00.566228 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:00 crc kubenswrapper[5133]: I1121 13:44:00.566247 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:00Z","lastTransitionTime":"2025-11-21T13:44:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:00 crc kubenswrapper[5133]: I1121 13:44:00.668705 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:00 crc kubenswrapper[5133]: I1121 13:44:00.668799 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:00 crc kubenswrapper[5133]: I1121 13:44:00.668849 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:00 crc kubenswrapper[5133]: I1121 13:44:00.668872 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:00 crc kubenswrapper[5133]: I1121 13:44:00.668891 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:00Z","lastTransitionTime":"2025-11-21T13:44:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:00 crc kubenswrapper[5133]: I1121 13:44:00.772431 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:00 crc kubenswrapper[5133]: I1121 13:44:00.772577 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:00 crc kubenswrapper[5133]: I1121 13:44:00.772600 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:00 crc kubenswrapper[5133]: I1121 13:44:00.772624 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:00 crc kubenswrapper[5133]: I1121 13:44:00.772682 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:00Z","lastTransitionTime":"2025-11-21T13:44:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:00 crc kubenswrapper[5133]: I1121 13:44:00.876189 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:00 crc kubenswrapper[5133]: I1121 13:44:00.876257 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:00 crc kubenswrapper[5133]: I1121 13:44:00.876269 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:00 crc kubenswrapper[5133]: I1121 13:44:00.876285 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:00 crc kubenswrapper[5133]: I1121 13:44:00.876295 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:00Z","lastTransitionTime":"2025-11-21T13:44:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:00 crc kubenswrapper[5133]: I1121 13:44:00.979555 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:00 crc kubenswrapper[5133]: I1121 13:44:00.979623 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:00 crc kubenswrapper[5133]: I1121 13:44:00.979638 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:00 crc kubenswrapper[5133]: I1121 13:44:00.979655 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:00 crc kubenswrapper[5133]: I1121 13:44:00.979669 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:00Z","lastTransitionTime":"2025-11-21T13:44:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:01 crc kubenswrapper[5133]: I1121 13:44:01.083538 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:01 crc kubenswrapper[5133]: I1121 13:44:01.083615 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:01 crc kubenswrapper[5133]: I1121 13:44:01.083630 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:01 crc kubenswrapper[5133]: I1121 13:44:01.083648 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:01 crc kubenswrapper[5133]: I1121 13:44:01.083662 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:01Z","lastTransitionTime":"2025-11-21T13:44:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:01 crc kubenswrapper[5133]: I1121 13:44:01.187144 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:01 crc kubenswrapper[5133]: I1121 13:44:01.187211 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:01 crc kubenswrapper[5133]: I1121 13:44:01.187229 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:01 crc kubenswrapper[5133]: I1121 13:44:01.187256 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:01 crc kubenswrapper[5133]: I1121 13:44:01.187278 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:01Z","lastTransitionTime":"2025-11-21T13:44:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:01 crc kubenswrapper[5133]: I1121 13:44:01.290851 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:01 crc kubenswrapper[5133]: I1121 13:44:01.290916 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:01 crc kubenswrapper[5133]: I1121 13:44:01.290938 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:01 crc kubenswrapper[5133]: I1121 13:44:01.290971 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:01 crc kubenswrapper[5133]: I1121 13:44:01.291043 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:01Z","lastTransitionTime":"2025-11-21T13:44:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:01 crc kubenswrapper[5133]: I1121 13:44:01.395391 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:01 crc kubenswrapper[5133]: I1121 13:44:01.395466 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:01 crc kubenswrapper[5133]: I1121 13:44:01.395487 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:01 crc kubenswrapper[5133]: I1121 13:44:01.395520 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:01 crc kubenswrapper[5133]: I1121 13:44:01.395539 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:01Z","lastTransitionTime":"2025-11-21T13:44:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:01 crc kubenswrapper[5133]: I1121 13:44:01.457313 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:44:01 crc kubenswrapper[5133]: I1121 13:44:01.457423 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:44:01 crc kubenswrapper[5133]: I1121 13:44:01.457456 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-x5wnh" Nov 21 13:44:01 crc kubenswrapper[5133]: E1121 13:44:01.457603 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 13:44:01 crc kubenswrapper[5133]: E1121 13:44:01.457731 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 13:44:01 crc kubenswrapper[5133]: E1121 13:44:01.457878 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-x5wnh" podUID="b3aabda0-97d9-4886-8909-1c423c4d3238" Nov 21 13:44:01 crc kubenswrapper[5133]: I1121 13:44:01.498953 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:01 crc kubenswrapper[5133]: I1121 13:44:01.499058 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:01 crc kubenswrapper[5133]: I1121 13:44:01.499120 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:01 crc kubenswrapper[5133]: I1121 13:44:01.499150 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:01 crc kubenswrapper[5133]: I1121 13:44:01.499208 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:01Z","lastTransitionTime":"2025-11-21T13:44:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:01 crc kubenswrapper[5133]: I1121 13:44:01.603502 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:01 crc kubenswrapper[5133]: I1121 13:44:01.603586 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:01 crc kubenswrapper[5133]: I1121 13:44:01.603616 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:01 crc kubenswrapper[5133]: I1121 13:44:01.603660 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:01 crc kubenswrapper[5133]: I1121 13:44:01.603691 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:01Z","lastTransitionTime":"2025-11-21T13:44:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:01 crc kubenswrapper[5133]: I1121 13:44:01.707217 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:01 crc kubenswrapper[5133]: I1121 13:44:01.707281 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:01 crc kubenswrapper[5133]: I1121 13:44:01.707299 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:01 crc kubenswrapper[5133]: I1121 13:44:01.707325 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:01 crc kubenswrapper[5133]: I1121 13:44:01.707342 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:01Z","lastTransitionTime":"2025-11-21T13:44:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:01 crc kubenswrapper[5133]: I1121 13:44:01.810416 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:01 crc kubenswrapper[5133]: I1121 13:44:01.810469 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:01 crc kubenswrapper[5133]: I1121 13:44:01.810486 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:01 crc kubenswrapper[5133]: I1121 13:44:01.810512 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:01 crc kubenswrapper[5133]: I1121 13:44:01.810530 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:01Z","lastTransitionTime":"2025-11-21T13:44:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:01 crc kubenswrapper[5133]: I1121 13:44:01.913394 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:01 crc kubenswrapper[5133]: I1121 13:44:01.913484 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:01 crc kubenswrapper[5133]: I1121 13:44:01.913505 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:01 crc kubenswrapper[5133]: I1121 13:44:01.913534 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:01 crc kubenswrapper[5133]: I1121 13:44:01.913555 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:01Z","lastTransitionTime":"2025-11-21T13:44:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:02 crc kubenswrapper[5133]: I1121 13:44:02.016987 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:02 crc kubenswrapper[5133]: I1121 13:44:02.017096 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:02 crc kubenswrapper[5133]: I1121 13:44:02.017145 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:02 crc kubenswrapper[5133]: I1121 13:44:02.017171 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:02 crc kubenswrapper[5133]: I1121 13:44:02.017191 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:02Z","lastTransitionTime":"2025-11-21T13:44:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:02 crc kubenswrapper[5133]: I1121 13:44:02.120202 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:02 crc kubenswrapper[5133]: I1121 13:44:02.120265 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:02 crc kubenswrapper[5133]: I1121 13:44:02.120288 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:02 crc kubenswrapper[5133]: I1121 13:44:02.120319 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:02 crc kubenswrapper[5133]: I1121 13:44:02.120341 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:02Z","lastTransitionTime":"2025-11-21T13:44:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:02 crc kubenswrapper[5133]: I1121 13:44:02.223813 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:02 crc kubenswrapper[5133]: I1121 13:44:02.223873 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:02 crc kubenswrapper[5133]: I1121 13:44:02.223890 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:02 crc kubenswrapper[5133]: I1121 13:44:02.223914 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:02 crc kubenswrapper[5133]: I1121 13:44:02.223931 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:02Z","lastTransitionTime":"2025-11-21T13:44:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:02 crc kubenswrapper[5133]: I1121 13:44:02.326194 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:02 crc kubenswrapper[5133]: I1121 13:44:02.326263 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:02 crc kubenswrapper[5133]: I1121 13:44:02.326285 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:02 crc kubenswrapper[5133]: I1121 13:44:02.326318 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:02 crc kubenswrapper[5133]: I1121 13:44:02.326341 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:02Z","lastTransitionTime":"2025-11-21T13:44:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:02 crc kubenswrapper[5133]: I1121 13:44:02.429305 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:02 crc kubenswrapper[5133]: I1121 13:44:02.429379 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:02 crc kubenswrapper[5133]: I1121 13:44:02.429402 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:02 crc kubenswrapper[5133]: I1121 13:44:02.429433 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:02 crc kubenswrapper[5133]: I1121 13:44:02.429456 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:02Z","lastTransitionTime":"2025-11-21T13:44:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:02 crc kubenswrapper[5133]: I1121 13:44:02.457183 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:44:02 crc kubenswrapper[5133]: E1121 13:44:02.457362 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 13:44:02 crc kubenswrapper[5133]: I1121 13:44:02.480922 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:44:02Z is after 2025-08-24T17:21:41Z" Nov 21 13:44:02 crc kubenswrapper[5133]: I1121 13:44:02.510443 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pvdwc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87822156-53e8-4eb5-b241-db506a21a1b9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dc374f8a6deccf60de941df327adc9f29d951a10746bd754c0d4f5573a141a71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jzt65\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pvdwc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:44:02Z is after 2025-08-24T17:21:41Z" Nov 21 13:44:02 crc kubenswrapper[5133]: I1121 13:44:02.533939 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:02 crc kubenswrapper[5133]: I1121 13:44:02.533989 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:02 crc kubenswrapper[5133]: I1121 13:44:02.534036 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:02 crc kubenswrapper[5133]: I1121 13:44:02.534072 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:02 crc kubenswrapper[5133]: I1121 13:44:02.534092 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:02Z","lastTransitionTime":"2025-11-21T13:44:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:02 crc kubenswrapper[5133]: I1121 13:44:02.551240 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c335cd8-618b-4871-a0e2-deaa61ddc49a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b5f6320dbfb8d910e52de319fe5350b435c1c9f00a2e1d5b2b953fb6d1688984\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://273a23deb0bee7d80bc12f28a4056a5b843e81cc7c411273e49c3aa0fdba5182\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c93dddad8f7a853e1302ba96f3fc6d6626b22de64c8cfd1ee63996820d0816cd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebb7634a6507b2323d36c3d57b19c374862e0bada0e81150da9db315e5812f12\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:44:02Z is after 2025-08-24T17:21:41Z" Nov 21 13:44:02 crc kubenswrapper[5133]: I1121 13:44:02.572103 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1b5e12d17b3e683349818698223816569514a9f4ae5d14ba1f5661c472fce39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:44:02Z is after 2025-08-24T17:21:41Z" Nov 21 13:44:02 crc kubenswrapper[5133]: I1121 13:44:02.586322 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bj52j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f9cc533c-2914-45d2-97b4-d6e35361450d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b1cfd49e0e5564696bd26f92acb10ca3430f81dc3f690a51ecd7bfa14876bccb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zdp6q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:55Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bj52j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:44:02Z is after 2025-08-24T17:21:41Z" Nov 21 13:44:02 crc kubenswrapper[5133]: I1121 13:44:02.599907 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-x5wnh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b3aabda0-97d9-4886-8909-1c423c4d3238\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:07Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p8kp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p8kp7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:43:07Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-x5wnh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:44:02Z is after 2025-08-24T17:21:41Z" Nov 21 13:44:02 crc kubenswrapper[5133]: I1121 13:44:02.621770 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"373d5da7-fae9-4689-9ede-6e2d69a54c02\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce076f27563e648bcbfd183634e87e0e31cedc359d0df1edc6af448b2a18f1a1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b6bfce121246f367a034c172b839a31fe309cfc0f83db7ab4e48cb26d6a5145\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c53aca99f41348a8343f7a2a2afd9ca78e2e4ba6aae9bb06cdb3ed66c9d79aa8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://94aee1dbbc6cd90fac255e86ddb27f159eba2e08dc6cc749a8eb351842330ee6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://563b9e061f37ddab57173a01efbf7bf025c470edccc47a03e7c5bb1e317a289f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fd5003cc4327d8234259623232e844463af0efdc0b3e395fa3e2c30c714b872d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ccbca4b83db30237624e807299bb17bb84a66216f27148373ea648a4c0cc962\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5ccbca4b83db30237624e807299bb17bb84a66216f27148373ea648a4c0cc962\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T13:43:51Z\\\",\\\"message\\\":\\\"anaged:true include.release.openshift.io/self-managed-high-availability:true service.alpha.openshift.io/serving-cert-secret-name:package-server-manager-serving-cert service.alpha.openshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168 service.beta.openshift.io/serving-cert-signed-by:openshift-service-serving-signer@1740288168] [{config.openshift.io/v1 ClusterVersion version 9101b518-476b-4eea-8fa6-69b0534e5caa 0xc00717227b \\\\u003cnil\\\\u003e}] [] []},Spec:ServiceSpec{Ports:[]ServicePort{ServicePort{Name:metrics,Protocol:TCP,Port:8443,TargetPort:{1 0 metrics},NodePort:0,AppProtocol:nil,},},Selector:map[string]string{app: package-server-manager,},ClusterIP:10.217.4.110,Type:ClusterIP,ExternalIPs:[],SessionAffinity:None,LoadBalancerIP:,LoadBalancerSourceRanges:[],ExternalName:,ExternalTrafficPolicy:,HealthCheckNodePort:0,PublishNotReadyAddresses:false,SessionAffinityConfig:nil,IPFamilyPolicy:*SingleStack,ClusterIPs:[10.217.4.110],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nF1121 13:43:51.638092 7192 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:43:50Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-tjzm8_openshift-ovn-kubernetes(373d5da7-fae9-4689-9ede-6e2d69a54c02)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ab3fdf87c8fc052cd429333579ede0e857fcc8399de947f661b159e6a5f2a93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lr2l4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-tjzm8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:44:02Z is after 2025-08-24T17:21:41Z" Nov 21 13:44:02 crc kubenswrapper[5133]: I1121 13:44:02.637645 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:02 crc kubenswrapper[5133]: I1121 13:44:02.637711 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:02 crc kubenswrapper[5133]: I1121 13:44:02.637729 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:02 crc kubenswrapper[5133]: I1121 13:44:02.637755 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:02 crc kubenswrapper[5133]: I1121 13:44:02.637775 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:02Z","lastTransitionTime":"2025-11-21T13:44:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:02 crc kubenswrapper[5133]: I1121 13:44:02.642949 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-b9v8b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0ec3a98-4d89-4f36-a79e-ac65da8672ca\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://933bd113027e6a7ee2eba33df125dcdcf389c21b99c7dd32bc654edaf7278e2a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79452c0417a3dea89f6c383f5ae529411381ac6c58dec393be6833be2cd8d848\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://775733ba3a7c5210b2e44d2a607ceed21c007af8728a1b5157750cb942a56c50\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d21a06ad72a199d989c29326725c9d49df6fbb9fc6e9d54bcd1f7bb89c78b02\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6d21a06ad72a199d989c29326725c9d49df6fbb9fc6e9d54bcd1f7bb89c78b02\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7216da1e17ab61329ca71325b40fdbd040dbf83f072d565302a571acb1313e53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7216da1e17ab61329ca71325b40fdbd040dbf83f072d565302a571acb1313e53\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a466e70f0711cd7d64221f62309d99dfa9e4b9910a69b2397240cfdf244578\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3a466e70f0711cd7d64221f62309d99dfa9e4b9910a69b2397240cfdf244578\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:43:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1a6a968fb55017d7e2fee878bb4d50904766820de016afd8f01de3bbd92b2421\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1a6a968fb55017d7e2fee878bb4d50904766820de016afd8f01de3bbd92b2421\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:43:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:43:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wqxhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:53Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-b9v8b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:44:02Z is after 2025-08-24T17:21:41Z" Nov 21 13:44:02 crc kubenswrapper[5133]: I1121 13:44:02.661301 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0f826000-be5b-4f8f-bdc5-b80e11bb5e65\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cac876542527f108f89313704d6275aed6b735176f7f38b0fccbfcd79fdbf6e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aa9a560543d545bd50cbb9aa0e907a992f9b3afb36de7ec5e72010dd835d2574\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d2c86055d41460f757efc29eaa62834faf3f14f9ca5ba534479d0fcd0a43d3bd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bca102c9301a0963f4be54906a6ca418a585d0aa6b063a0512c2a334928f0d88\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5c3a46b403b7e6dab559f6fccde4e7f3fa2abc837c8323745aff9a8ea03dfd73\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-21T13:42:47Z\\\",\\\"message\\\":\\\"le observer\\\\nW1121 13:42:47.565555 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1121 13:42:47.567527 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1121 13:42:47.569658 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3912986073/tls.crt::/tmp/serving-cert-3912986073/tls.key\\\\\\\"\\\\nI1121 13:42:47.852533 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1121 13:42:47.856751 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1121 13:42:47.856781 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1121 13:42:47.856814 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1121 13:42:47.856821 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1121 13:42:47.862211 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1121 13:42:47.862280 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862290 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1121 13:42:47.862309 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1121 13:42:47.862319 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1121 13:42:47.862326 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1121 13:42:47.862333 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1121 13:42:47.863057 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1121 13:42:47.865438 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:32Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc6b16c41d8bc248fc4de65102a71d3875d1ab768432f61581605fa487ebfc9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2865bc7a7431a9bf5b413310584ac39b94cfeb9785ec679542bb036255c57b57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:44:02Z is after 2025-08-24T17:21:41Z" Nov 21 13:44:02 crc kubenswrapper[5133]: I1121 13:44:02.682768 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:49Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ede95ef8b82acda5cadd081a37fcb2a35fab8269c7ec403bb33a6feb8bf9eb88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3037d1f01bc9704cae9aa3eb4760e4dc737b1990a6ae5a007d3ec412efad85a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:44:02Z is after 2025-08-24T17:21:41Z" Nov 21 13:44:02 crc kubenswrapper[5133]: I1121 13:44:02.703431 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:44:02Z is after 2025-08-24T17:21:41Z" Nov 21 13:44:02 crc kubenswrapper[5133]: I1121 13:44:02.718518 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:51Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1236e5c5c7c8db59fd2faa688e9b781fe94721cc8aa644dd9ab91df2684c617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:44:02Z is after 2025-08-24T17:21:41Z" Nov 21 13:44:02 crc kubenswrapper[5133]: I1121 13:44:02.739663 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-m5d24" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0077329a-abad-4c6d-a601-2dc01fd83184\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://49d78ec51ff96fc1ff9c0e668751296475767d8bf09673ff5d0f1a9d896d6595\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://24fe246ff402a8854ee5e55ccc507a2e497dbb2cdfed3f0f8b380f00b9436661\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-21T13:43:40Z\\\",\\\"message\\\":\\\"2025-11-21T13:42:55+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_479ae883-3ec6-417b-ad49-d387b81ff874\\\\n2025-11-21T13:42:55+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_479ae883-3ec6-417b-ad49-d387b81ff874 to /host/opt/cni/bin/\\\\n2025-11-21T13:42:55Z [verbose] multus-daemon started\\\\n2025-11-21T13:42:55Z [verbose] Readiness Indicator file check\\\\n2025-11-21T13:43:40Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmd8c\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-multus\"/\"multus-m5d24\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:44:02Z is after 2025-08-24T17:21:41Z" Nov 21 13:44:02 crc kubenswrapper[5133]: I1121 13:44:02.740840 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:02 crc kubenswrapper[5133]: I1121 13:44:02.740895 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:02 crc kubenswrapper[5133]: I1121 13:44:02.740912 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:02 crc kubenswrapper[5133]: I1121 13:44:02.740940 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:02 crc kubenswrapper[5133]: I1121 13:44:02.740958 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:02Z","lastTransitionTime":"2025-11-21T13:44:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:02 crc kubenswrapper[5133]: I1121 13:44:02.756726 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"52f5a729-05d1-4f84-a216-1df3233af57d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c46519115b067feef9d8fb5783b8b9420bf99d97515021a7d389e6cdf1d64112\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e5c730e837240b2ed45dff8a5411b8b49d21e7fbfb2dfcc6aef568b73b57745\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4gnvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:52Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xxlvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:44:02Z is after 2025-08-24T17:21:41Z" Nov 21 13:44:02 crc kubenswrapper[5133]: I1121 13:44:02.772953 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75a51560-1657-43fa-880f-bece0b75e088\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f1dc13dd497fa4611689b3c047c602511745dbff2b9a797f8ea7316046531717\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ca37cc062dd8763ac13b5a07fcd06f2997b1ff9cfe38d9a9ff3091980d679932\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57552cf42d0ae179e9c1c67120613da40995fbe308e06dbd466d0f71167142b2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f00c6fbd3ed9ce5e2787b591f39bd80828b96dd53f15696adfe62c3df47ed47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f00c6fbd3ed9ce5e2787b591f39bd80828b96dd53f15696adfe62c3df47ed47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:44:02Z is after 2025-08-24T17:21:41Z" Nov 21 13:44:02 crc kubenswrapper[5133]: I1121 13:44:02.788274 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4e4ae5c5-33df-4caa-9e92-c7070da4d494\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e8fe024377d7d7d21f5e1826640081a7cada1c3ac389b94deccdc80360afc56\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:42:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e019adb31c6276b82ca39edac3f8d20a563baeb5bc34dbd64df79c9ae690b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3e019adb31c6276b82ca39edac3f8d20a563baeb5bc34dbd64df79c9ae690b5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-21T13:42:26Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-21T13:42:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:42:23Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:44:02Z is after 2025-08-24T17:21:41Z" Nov 21 13:44:02 crc kubenswrapper[5133]: I1121 13:44:02.807227 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-21T13:42:48Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:44:02Z is after 2025-08-24T17:21:41Z" Nov 21 13:44:02 crc kubenswrapper[5133]: I1121 13:44:02.822076 5133 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7vfdg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2c337e24-9cef-4932-92ae-5a175379c77a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-21T13:43:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0eeb4cc8e3340d9eb9710ca7b55244d4055d6bc7cc31a6d227f233988b242823\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrzcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc648165e145d8ed6a43aff3ffb558380dc55ffd03816922b643bf7f740088fa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-21T13:43:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wrzcz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-21T13:43:05Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-7vfdg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:44:02Z is after 2025-08-24T17:21:41Z" Nov 21 13:44:02 crc kubenswrapper[5133]: I1121 13:44:02.843323 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:02 crc kubenswrapper[5133]: I1121 13:44:02.843429 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:02 crc kubenswrapper[5133]: I1121 13:44:02.843456 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:02 crc kubenswrapper[5133]: I1121 13:44:02.843478 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:02 crc kubenswrapper[5133]: I1121 13:44:02.843493 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:02Z","lastTransitionTime":"2025-11-21T13:44:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:02 crc kubenswrapper[5133]: I1121 13:44:02.947101 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:02 crc kubenswrapper[5133]: I1121 13:44:02.947175 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:02 crc kubenswrapper[5133]: I1121 13:44:02.947200 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:02 crc kubenswrapper[5133]: I1121 13:44:02.947233 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:02 crc kubenswrapper[5133]: I1121 13:44:02.947255 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:02Z","lastTransitionTime":"2025-11-21T13:44:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:03 crc kubenswrapper[5133]: I1121 13:44:03.050534 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:03 crc kubenswrapper[5133]: I1121 13:44:03.050896 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:03 crc kubenswrapper[5133]: I1121 13:44:03.051077 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:03 crc kubenswrapper[5133]: I1121 13:44:03.051235 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:03 crc kubenswrapper[5133]: I1121 13:44:03.051365 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:03Z","lastTransitionTime":"2025-11-21T13:44:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:03 crc kubenswrapper[5133]: I1121 13:44:03.154248 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:03 crc kubenswrapper[5133]: I1121 13:44:03.154314 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:03 crc kubenswrapper[5133]: I1121 13:44:03.154332 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:03 crc kubenswrapper[5133]: I1121 13:44:03.154358 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:03 crc kubenswrapper[5133]: I1121 13:44:03.154377 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:03Z","lastTransitionTime":"2025-11-21T13:44:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:03 crc kubenswrapper[5133]: I1121 13:44:03.257749 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:03 crc kubenswrapper[5133]: I1121 13:44:03.257832 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:03 crc kubenswrapper[5133]: I1121 13:44:03.257857 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:03 crc kubenswrapper[5133]: I1121 13:44:03.257889 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:03 crc kubenswrapper[5133]: I1121 13:44:03.257914 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:03Z","lastTransitionTime":"2025-11-21T13:44:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:03 crc kubenswrapper[5133]: I1121 13:44:03.361512 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:03 crc kubenswrapper[5133]: I1121 13:44:03.361601 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:03 crc kubenswrapper[5133]: I1121 13:44:03.361651 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:03 crc kubenswrapper[5133]: I1121 13:44:03.361679 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:03 crc kubenswrapper[5133]: I1121 13:44:03.361696 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:03Z","lastTransitionTime":"2025-11-21T13:44:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:03 crc kubenswrapper[5133]: I1121 13:44:03.456704 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-x5wnh" Nov 21 13:44:03 crc kubenswrapper[5133]: I1121 13:44:03.456733 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:44:03 crc kubenswrapper[5133]: I1121 13:44:03.456814 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:44:03 crc kubenswrapper[5133]: E1121 13:44:03.456892 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-x5wnh" podUID="b3aabda0-97d9-4886-8909-1c423c4d3238" Nov 21 13:44:03 crc kubenswrapper[5133]: E1121 13:44:03.457102 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 13:44:03 crc kubenswrapper[5133]: E1121 13:44:03.457272 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 13:44:03 crc kubenswrapper[5133]: I1121 13:44:03.465260 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:03 crc kubenswrapper[5133]: I1121 13:44:03.465388 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:03 crc kubenswrapper[5133]: I1121 13:44:03.465405 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:03 crc kubenswrapper[5133]: I1121 13:44:03.465427 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:03 crc kubenswrapper[5133]: I1121 13:44:03.465443 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:03Z","lastTransitionTime":"2025-11-21T13:44:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:03 crc kubenswrapper[5133]: I1121 13:44:03.568064 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:03 crc kubenswrapper[5133]: I1121 13:44:03.568135 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:03 crc kubenswrapper[5133]: I1121 13:44:03.568156 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:03 crc kubenswrapper[5133]: I1121 13:44:03.568185 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:03 crc kubenswrapper[5133]: I1121 13:44:03.568207 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:03Z","lastTransitionTime":"2025-11-21T13:44:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:03 crc kubenswrapper[5133]: I1121 13:44:03.671099 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:03 crc kubenswrapper[5133]: I1121 13:44:03.671141 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:03 crc kubenswrapper[5133]: I1121 13:44:03.671154 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:03 crc kubenswrapper[5133]: I1121 13:44:03.671174 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:03 crc kubenswrapper[5133]: I1121 13:44:03.671190 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:03Z","lastTransitionTime":"2025-11-21T13:44:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:03 crc kubenswrapper[5133]: I1121 13:44:03.773382 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:03 crc kubenswrapper[5133]: I1121 13:44:03.773447 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:03 crc kubenswrapper[5133]: I1121 13:44:03.773464 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:03 crc kubenswrapper[5133]: I1121 13:44:03.773488 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:03 crc kubenswrapper[5133]: I1121 13:44:03.773504 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:03Z","lastTransitionTime":"2025-11-21T13:44:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:03 crc kubenswrapper[5133]: I1121 13:44:03.876368 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:03 crc kubenswrapper[5133]: I1121 13:44:03.876441 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:03 crc kubenswrapper[5133]: I1121 13:44:03.876466 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:03 crc kubenswrapper[5133]: I1121 13:44:03.876503 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:03 crc kubenswrapper[5133]: I1121 13:44:03.876528 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:03Z","lastTransitionTime":"2025-11-21T13:44:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:03 crc kubenswrapper[5133]: I1121 13:44:03.980043 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:03 crc kubenswrapper[5133]: I1121 13:44:03.980102 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:03 crc kubenswrapper[5133]: I1121 13:44:03.980119 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:03 crc kubenswrapper[5133]: I1121 13:44:03.980141 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:03 crc kubenswrapper[5133]: I1121 13:44:03.980158 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:03Z","lastTransitionTime":"2025-11-21T13:44:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:04 crc kubenswrapper[5133]: I1121 13:44:04.083378 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:04 crc kubenswrapper[5133]: I1121 13:44:04.083435 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:04 crc kubenswrapper[5133]: I1121 13:44:04.083451 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:04 crc kubenswrapper[5133]: I1121 13:44:04.083475 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:04 crc kubenswrapper[5133]: I1121 13:44:04.083493 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:04Z","lastTransitionTime":"2025-11-21T13:44:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:04 crc kubenswrapper[5133]: I1121 13:44:04.186647 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:04 crc kubenswrapper[5133]: I1121 13:44:04.187042 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:04 crc kubenswrapper[5133]: I1121 13:44:04.187062 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:04 crc kubenswrapper[5133]: I1121 13:44:04.187116 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:04 crc kubenswrapper[5133]: I1121 13:44:04.187135 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:04Z","lastTransitionTime":"2025-11-21T13:44:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:04 crc kubenswrapper[5133]: I1121 13:44:04.290042 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:04 crc kubenswrapper[5133]: I1121 13:44:04.290110 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:04 crc kubenswrapper[5133]: I1121 13:44:04.290148 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:04 crc kubenswrapper[5133]: I1121 13:44:04.290183 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:04 crc kubenswrapper[5133]: I1121 13:44:04.290207 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:04Z","lastTransitionTime":"2025-11-21T13:44:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:04 crc kubenswrapper[5133]: I1121 13:44:04.392855 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:04 crc kubenswrapper[5133]: I1121 13:44:04.392965 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:04 crc kubenswrapper[5133]: I1121 13:44:04.392986 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:04 crc kubenswrapper[5133]: I1121 13:44:04.393060 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:04 crc kubenswrapper[5133]: I1121 13:44:04.393086 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:04Z","lastTransitionTime":"2025-11-21T13:44:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:04 crc kubenswrapper[5133]: I1121 13:44:04.457557 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:44:04 crc kubenswrapper[5133]: E1121 13:44:04.457744 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 13:44:04 crc kubenswrapper[5133]: I1121 13:44:04.496506 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:04 crc kubenswrapper[5133]: I1121 13:44:04.496598 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:04 crc kubenswrapper[5133]: I1121 13:44:04.496621 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:04 crc kubenswrapper[5133]: I1121 13:44:04.496651 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:04 crc kubenswrapper[5133]: I1121 13:44:04.496674 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:04Z","lastTransitionTime":"2025-11-21T13:44:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:04 crc kubenswrapper[5133]: I1121 13:44:04.599901 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:04 crc kubenswrapper[5133]: I1121 13:44:04.599969 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:04 crc kubenswrapper[5133]: I1121 13:44:04.599993 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:04 crc kubenswrapper[5133]: I1121 13:44:04.600080 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:04 crc kubenswrapper[5133]: I1121 13:44:04.600105 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:04Z","lastTransitionTime":"2025-11-21T13:44:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:04 crc kubenswrapper[5133]: I1121 13:44:04.702922 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:04 crc kubenswrapper[5133]: I1121 13:44:04.703036 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:04 crc kubenswrapper[5133]: I1121 13:44:04.703057 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:04 crc kubenswrapper[5133]: I1121 13:44:04.703087 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:04 crc kubenswrapper[5133]: I1121 13:44:04.703111 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:04Z","lastTransitionTime":"2025-11-21T13:44:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:04 crc kubenswrapper[5133]: I1121 13:44:04.805504 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:04 crc kubenswrapper[5133]: I1121 13:44:04.805566 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:04 crc kubenswrapper[5133]: I1121 13:44:04.805588 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:04 crc kubenswrapper[5133]: I1121 13:44:04.805614 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:04 crc kubenswrapper[5133]: I1121 13:44:04.805636 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:04Z","lastTransitionTime":"2025-11-21T13:44:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:04 crc kubenswrapper[5133]: I1121 13:44:04.909386 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:04 crc kubenswrapper[5133]: I1121 13:44:04.909457 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:04 crc kubenswrapper[5133]: I1121 13:44:04.909545 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:04 crc kubenswrapper[5133]: I1121 13:44:04.909581 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:04 crc kubenswrapper[5133]: I1121 13:44:04.909607 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:04Z","lastTransitionTime":"2025-11-21T13:44:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:05 crc kubenswrapper[5133]: I1121 13:44:05.012617 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:05 crc kubenswrapper[5133]: I1121 13:44:05.012697 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:05 crc kubenswrapper[5133]: I1121 13:44:05.012729 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:05 crc kubenswrapper[5133]: I1121 13:44:05.012760 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:05 crc kubenswrapper[5133]: I1121 13:44:05.012781 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:05Z","lastTransitionTime":"2025-11-21T13:44:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:05 crc kubenswrapper[5133]: I1121 13:44:05.116068 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:05 crc kubenswrapper[5133]: I1121 13:44:05.116134 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:05 crc kubenswrapper[5133]: I1121 13:44:05.116156 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:05 crc kubenswrapper[5133]: I1121 13:44:05.116184 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:05 crc kubenswrapper[5133]: I1121 13:44:05.116209 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:05Z","lastTransitionTime":"2025-11-21T13:44:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:05 crc kubenswrapper[5133]: I1121 13:44:05.219446 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:05 crc kubenswrapper[5133]: I1121 13:44:05.219503 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:05 crc kubenswrapper[5133]: I1121 13:44:05.219519 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:05 crc kubenswrapper[5133]: I1121 13:44:05.219542 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:05 crc kubenswrapper[5133]: I1121 13:44:05.219559 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:05Z","lastTransitionTime":"2025-11-21T13:44:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:05 crc kubenswrapper[5133]: I1121 13:44:05.322465 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:05 crc kubenswrapper[5133]: I1121 13:44:05.322520 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:05 crc kubenswrapper[5133]: I1121 13:44:05.322536 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:05 crc kubenswrapper[5133]: I1121 13:44:05.322559 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:05 crc kubenswrapper[5133]: I1121 13:44:05.322576 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:05Z","lastTransitionTime":"2025-11-21T13:44:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:05 crc kubenswrapper[5133]: I1121 13:44:05.425299 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:05 crc kubenswrapper[5133]: I1121 13:44:05.425360 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:05 crc kubenswrapper[5133]: I1121 13:44:05.425381 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:05 crc kubenswrapper[5133]: I1121 13:44:05.425411 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:05 crc kubenswrapper[5133]: I1121 13:44:05.425433 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:05Z","lastTransitionTime":"2025-11-21T13:44:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:05 crc kubenswrapper[5133]: I1121 13:44:05.457170 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:44:05 crc kubenswrapper[5133]: I1121 13:44:05.457222 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:44:05 crc kubenswrapper[5133]: E1121 13:44:05.457394 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 13:44:05 crc kubenswrapper[5133]: I1121 13:44:05.457436 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-x5wnh" Nov 21 13:44:05 crc kubenswrapper[5133]: E1121 13:44:05.457565 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 13:44:05 crc kubenswrapper[5133]: E1121 13:44:05.457667 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-x5wnh" podUID="b3aabda0-97d9-4886-8909-1c423c4d3238" Nov 21 13:44:05 crc kubenswrapper[5133]: I1121 13:44:05.528808 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:05 crc kubenswrapper[5133]: I1121 13:44:05.528859 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:05 crc kubenswrapper[5133]: I1121 13:44:05.528871 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:05 crc kubenswrapper[5133]: I1121 13:44:05.528888 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:05 crc kubenswrapper[5133]: I1121 13:44:05.528901 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:05Z","lastTransitionTime":"2025-11-21T13:44:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:05 crc kubenswrapper[5133]: I1121 13:44:05.632616 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:05 crc kubenswrapper[5133]: I1121 13:44:05.632676 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:05 crc kubenswrapper[5133]: I1121 13:44:05.632697 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:05 crc kubenswrapper[5133]: I1121 13:44:05.632718 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:05 crc kubenswrapper[5133]: I1121 13:44:05.632732 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:05Z","lastTransitionTime":"2025-11-21T13:44:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:05 crc kubenswrapper[5133]: I1121 13:44:05.735871 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:05 crc kubenswrapper[5133]: I1121 13:44:05.735922 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:05 crc kubenswrapper[5133]: I1121 13:44:05.735974 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:05 crc kubenswrapper[5133]: I1121 13:44:05.736030 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:05 crc kubenswrapper[5133]: I1121 13:44:05.736049 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:05Z","lastTransitionTime":"2025-11-21T13:44:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:05 crc kubenswrapper[5133]: I1121 13:44:05.839781 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:05 crc kubenswrapper[5133]: I1121 13:44:05.839838 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:05 crc kubenswrapper[5133]: I1121 13:44:05.839855 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:05 crc kubenswrapper[5133]: I1121 13:44:05.839879 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:05 crc kubenswrapper[5133]: I1121 13:44:05.839929 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:05Z","lastTransitionTime":"2025-11-21T13:44:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:05 crc kubenswrapper[5133]: I1121 13:44:05.943944 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:05 crc kubenswrapper[5133]: I1121 13:44:05.944050 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:05 crc kubenswrapper[5133]: I1121 13:44:05.944075 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:05 crc kubenswrapper[5133]: I1121 13:44:05.944109 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:05 crc kubenswrapper[5133]: I1121 13:44:05.944132 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:05Z","lastTransitionTime":"2025-11-21T13:44:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:06 crc kubenswrapper[5133]: I1121 13:44:06.047507 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:06 crc kubenswrapper[5133]: I1121 13:44:06.047949 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:06 crc kubenswrapper[5133]: I1121 13:44:06.048189 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:06 crc kubenswrapper[5133]: I1121 13:44:06.048348 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:06 crc kubenswrapper[5133]: I1121 13:44:06.048495 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:06Z","lastTransitionTime":"2025-11-21T13:44:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:06 crc kubenswrapper[5133]: I1121 13:44:06.151234 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:06 crc kubenswrapper[5133]: I1121 13:44:06.151631 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:06 crc kubenswrapper[5133]: I1121 13:44:06.151865 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:06 crc kubenswrapper[5133]: I1121 13:44:06.152163 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:06 crc kubenswrapper[5133]: I1121 13:44:06.152357 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:06Z","lastTransitionTime":"2025-11-21T13:44:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:06 crc kubenswrapper[5133]: I1121 13:44:06.255593 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:06 crc kubenswrapper[5133]: I1121 13:44:06.256049 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:06 crc kubenswrapper[5133]: I1121 13:44:06.256229 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:06 crc kubenswrapper[5133]: I1121 13:44:06.256373 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:06 crc kubenswrapper[5133]: I1121 13:44:06.256514 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:06Z","lastTransitionTime":"2025-11-21T13:44:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:06 crc kubenswrapper[5133]: I1121 13:44:06.319324 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:06 crc kubenswrapper[5133]: I1121 13:44:06.319388 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:06 crc kubenswrapper[5133]: I1121 13:44:06.319416 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:06 crc kubenswrapper[5133]: I1121 13:44:06.319443 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:06 crc kubenswrapper[5133]: I1121 13:44:06.319463 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:06Z","lastTransitionTime":"2025-11-21T13:44:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:06 crc kubenswrapper[5133]: E1121 13:44:06.340186 5133 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:44:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:44:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:44:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:44:06Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:44:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:44:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:44:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:44:06Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"eb1f691e-5306-40d5-9666-4e51161aa15a\\\",\\\"systemUUID\\\":\\\"537cb059-79e6-48e5-b353-57bb495db8a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:44:06Z is after 2025-08-24T17:21:41Z" Nov 21 13:44:06 crc kubenswrapper[5133]: I1121 13:44:06.345472 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:06 crc kubenswrapper[5133]: I1121 13:44:06.345700 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:06 crc kubenswrapper[5133]: I1121 13:44:06.345940 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:06 crc kubenswrapper[5133]: I1121 13:44:06.346195 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:06 crc kubenswrapper[5133]: I1121 13:44:06.346443 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:06Z","lastTransitionTime":"2025-11-21T13:44:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:06 crc kubenswrapper[5133]: E1121 13:44:06.368248 5133 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:44:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:44:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:44:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:44:06Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:44:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:44:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:44:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:44:06Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"eb1f691e-5306-40d5-9666-4e51161aa15a\\\",\\\"systemUUID\\\":\\\"537cb059-79e6-48e5-b353-57bb495db8a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:44:06Z is after 2025-08-24T17:21:41Z" Nov 21 13:44:06 crc kubenswrapper[5133]: I1121 13:44:06.373909 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:06 crc kubenswrapper[5133]: I1121 13:44:06.373966 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:06 crc kubenswrapper[5133]: I1121 13:44:06.373985 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:06 crc kubenswrapper[5133]: I1121 13:44:06.374038 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:06 crc kubenswrapper[5133]: I1121 13:44:06.374060 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:06Z","lastTransitionTime":"2025-11-21T13:44:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:06 crc kubenswrapper[5133]: E1121 13:44:06.394863 5133 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:44:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:44:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:44:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:44:06Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:44:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:44:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:44:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:44:06Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"eb1f691e-5306-40d5-9666-4e51161aa15a\\\",\\\"systemUUID\\\":\\\"537cb059-79e6-48e5-b353-57bb495db8a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:44:06Z is after 2025-08-24T17:21:41Z" Nov 21 13:44:06 crc kubenswrapper[5133]: I1121 13:44:06.400097 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:06 crc kubenswrapper[5133]: I1121 13:44:06.400179 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:06 crc kubenswrapper[5133]: I1121 13:44:06.400197 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:06 crc kubenswrapper[5133]: I1121 13:44:06.400240 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:06 crc kubenswrapper[5133]: I1121 13:44:06.400256 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:06Z","lastTransitionTime":"2025-11-21T13:44:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:06 crc kubenswrapper[5133]: E1121 13:44:06.421944 5133 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:44:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:44:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:44:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:44:06Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:44:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:44:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:44:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:44:06Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"eb1f691e-5306-40d5-9666-4e51161aa15a\\\",\\\"systemUUID\\\":\\\"537cb059-79e6-48e5-b353-57bb495db8a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:44:06Z is after 2025-08-24T17:21:41Z" Nov 21 13:44:06 crc kubenswrapper[5133]: I1121 13:44:06.428959 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:06 crc kubenswrapper[5133]: I1121 13:44:06.429156 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:06 crc kubenswrapper[5133]: I1121 13:44:06.429267 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:06 crc kubenswrapper[5133]: I1121 13:44:06.429365 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:06 crc kubenswrapper[5133]: I1121 13:44:06.429469 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:06Z","lastTransitionTime":"2025-11-21T13:44:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:06 crc kubenswrapper[5133]: E1121 13:44:06.447045 5133 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:44:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:44:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:44:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:44:06Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:44:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:44:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-21T13:44:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-21T13:44:06Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"eb1f691e-5306-40d5-9666-4e51161aa15a\\\",\\\"systemUUID\\\":\\\"537cb059-79e6-48e5-b353-57bb495db8a2\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-21T13:44:06Z is after 2025-08-24T17:21:41Z" Nov 21 13:44:06 crc kubenswrapper[5133]: E1121 13:44:06.447797 5133 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 21 13:44:06 crc kubenswrapper[5133]: I1121 13:44:06.449993 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:06 crc kubenswrapper[5133]: I1121 13:44:06.450143 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:06 crc kubenswrapper[5133]: I1121 13:44:06.450215 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:06 crc kubenswrapper[5133]: I1121 13:44:06.450293 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:06 crc kubenswrapper[5133]: I1121 13:44:06.450360 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:06Z","lastTransitionTime":"2025-11-21T13:44:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:06 crc kubenswrapper[5133]: I1121 13:44:06.457516 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:44:06 crc kubenswrapper[5133]: E1121 13:44:06.457697 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 13:44:06 crc kubenswrapper[5133]: I1121 13:44:06.553363 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:06 crc kubenswrapper[5133]: I1121 13:44:06.553429 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:06 crc kubenswrapper[5133]: I1121 13:44:06.553447 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:06 crc kubenswrapper[5133]: I1121 13:44:06.553473 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:06 crc kubenswrapper[5133]: I1121 13:44:06.553490 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:06Z","lastTransitionTime":"2025-11-21T13:44:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:06 crc kubenswrapper[5133]: I1121 13:44:06.657069 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:06 crc kubenswrapper[5133]: I1121 13:44:06.657142 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:06 crc kubenswrapper[5133]: I1121 13:44:06.657165 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:06 crc kubenswrapper[5133]: I1121 13:44:06.657195 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:06 crc kubenswrapper[5133]: I1121 13:44:06.657219 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:06Z","lastTransitionTime":"2025-11-21T13:44:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:06 crc kubenswrapper[5133]: I1121 13:44:06.760077 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:06 crc kubenswrapper[5133]: I1121 13:44:06.760480 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:06 crc kubenswrapper[5133]: I1121 13:44:06.760632 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:06 crc kubenswrapper[5133]: I1121 13:44:06.760786 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:06 crc kubenswrapper[5133]: I1121 13:44:06.760936 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:06Z","lastTransitionTime":"2025-11-21T13:44:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:06 crc kubenswrapper[5133]: I1121 13:44:06.863619 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:06 crc kubenswrapper[5133]: I1121 13:44:06.863688 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:06 crc kubenswrapper[5133]: I1121 13:44:06.863706 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:06 crc kubenswrapper[5133]: I1121 13:44:06.863732 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:06 crc kubenswrapper[5133]: I1121 13:44:06.863788 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:06Z","lastTransitionTime":"2025-11-21T13:44:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:06 crc kubenswrapper[5133]: I1121 13:44:06.965612 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:06 crc kubenswrapper[5133]: I1121 13:44:06.965864 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:06 crc kubenswrapper[5133]: I1121 13:44:06.965938 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:06 crc kubenswrapper[5133]: I1121 13:44:06.966064 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:06 crc kubenswrapper[5133]: I1121 13:44:06.966146 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:06Z","lastTransitionTime":"2025-11-21T13:44:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:07 crc kubenswrapper[5133]: I1121 13:44:07.069498 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:07 crc kubenswrapper[5133]: I1121 13:44:07.069545 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:07 crc kubenswrapper[5133]: I1121 13:44:07.069557 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:07 crc kubenswrapper[5133]: I1121 13:44:07.069578 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:07 crc kubenswrapper[5133]: I1121 13:44:07.069591 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:07Z","lastTransitionTime":"2025-11-21T13:44:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:07 crc kubenswrapper[5133]: I1121 13:44:07.173576 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:07 crc kubenswrapper[5133]: I1121 13:44:07.173637 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:07 crc kubenswrapper[5133]: I1121 13:44:07.173653 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:07 crc kubenswrapper[5133]: I1121 13:44:07.173679 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:07 crc kubenswrapper[5133]: I1121 13:44:07.173697 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:07Z","lastTransitionTime":"2025-11-21T13:44:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:07 crc kubenswrapper[5133]: I1121 13:44:07.278952 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:07 crc kubenswrapper[5133]: I1121 13:44:07.279160 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:07 crc kubenswrapper[5133]: I1121 13:44:07.279193 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:07 crc kubenswrapper[5133]: I1121 13:44:07.279265 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:07 crc kubenswrapper[5133]: I1121 13:44:07.279287 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:07Z","lastTransitionTime":"2025-11-21T13:44:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:07 crc kubenswrapper[5133]: I1121 13:44:07.382478 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:07 crc kubenswrapper[5133]: I1121 13:44:07.382531 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:07 crc kubenswrapper[5133]: I1121 13:44:07.382549 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:07 crc kubenswrapper[5133]: I1121 13:44:07.382582 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:07 crc kubenswrapper[5133]: I1121 13:44:07.382605 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:07Z","lastTransitionTime":"2025-11-21T13:44:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:07 crc kubenswrapper[5133]: I1121 13:44:07.457730 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:44:07 crc kubenswrapper[5133]: I1121 13:44:07.457881 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:44:07 crc kubenswrapper[5133]: E1121 13:44:07.458076 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 13:44:07 crc kubenswrapper[5133]: I1121 13:44:07.458223 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-x5wnh" Nov 21 13:44:07 crc kubenswrapper[5133]: E1121 13:44:07.458329 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 13:44:07 crc kubenswrapper[5133]: E1121 13:44:07.458608 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-x5wnh" podUID="b3aabda0-97d9-4886-8909-1c423c4d3238" Nov 21 13:44:07 crc kubenswrapper[5133]: I1121 13:44:07.485970 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:07 crc kubenswrapper[5133]: I1121 13:44:07.486036 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:07 crc kubenswrapper[5133]: I1121 13:44:07.486045 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:07 crc kubenswrapper[5133]: I1121 13:44:07.486063 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:07 crc kubenswrapper[5133]: I1121 13:44:07.486074 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:07Z","lastTransitionTime":"2025-11-21T13:44:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:07 crc kubenswrapper[5133]: I1121 13:44:07.590113 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:07 crc kubenswrapper[5133]: I1121 13:44:07.590194 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:07 crc kubenswrapper[5133]: I1121 13:44:07.590212 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:07 crc kubenswrapper[5133]: I1121 13:44:07.590237 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:07 crc kubenswrapper[5133]: I1121 13:44:07.590257 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:07Z","lastTransitionTime":"2025-11-21T13:44:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:07 crc kubenswrapper[5133]: I1121 13:44:07.693650 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:07 crc kubenswrapper[5133]: I1121 13:44:07.693729 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:07 crc kubenswrapper[5133]: I1121 13:44:07.693751 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:07 crc kubenswrapper[5133]: I1121 13:44:07.693779 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:07 crc kubenswrapper[5133]: I1121 13:44:07.693807 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:07Z","lastTransitionTime":"2025-11-21T13:44:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:07 crc kubenswrapper[5133]: I1121 13:44:07.797418 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:07 crc kubenswrapper[5133]: I1121 13:44:07.797476 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:07 crc kubenswrapper[5133]: I1121 13:44:07.797492 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:07 crc kubenswrapper[5133]: I1121 13:44:07.797513 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:07 crc kubenswrapper[5133]: I1121 13:44:07.797528 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:07Z","lastTransitionTime":"2025-11-21T13:44:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:07 crc kubenswrapper[5133]: I1121 13:44:07.900450 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:07 crc kubenswrapper[5133]: I1121 13:44:07.900498 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:07 crc kubenswrapper[5133]: I1121 13:44:07.900510 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:07 crc kubenswrapper[5133]: I1121 13:44:07.900528 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:07 crc kubenswrapper[5133]: I1121 13:44:07.900541 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:07Z","lastTransitionTime":"2025-11-21T13:44:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:08 crc kubenswrapper[5133]: I1121 13:44:08.003993 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:08 crc kubenswrapper[5133]: I1121 13:44:08.004083 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:08 crc kubenswrapper[5133]: I1121 13:44:08.004105 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:08 crc kubenswrapper[5133]: I1121 13:44:08.004137 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:08 crc kubenswrapper[5133]: I1121 13:44:08.004158 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:08Z","lastTransitionTime":"2025-11-21T13:44:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:08 crc kubenswrapper[5133]: I1121 13:44:08.107248 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:08 crc kubenswrapper[5133]: I1121 13:44:08.107327 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:08 crc kubenswrapper[5133]: I1121 13:44:08.107352 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:08 crc kubenswrapper[5133]: I1121 13:44:08.107384 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:08 crc kubenswrapper[5133]: I1121 13:44:08.107407 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:08Z","lastTransitionTime":"2025-11-21T13:44:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:08 crc kubenswrapper[5133]: I1121 13:44:08.209935 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:08 crc kubenswrapper[5133]: I1121 13:44:08.209989 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:08 crc kubenswrapper[5133]: I1121 13:44:08.210033 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:08 crc kubenswrapper[5133]: I1121 13:44:08.210055 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:08 crc kubenswrapper[5133]: I1121 13:44:08.210075 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:08Z","lastTransitionTime":"2025-11-21T13:44:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:08 crc kubenswrapper[5133]: I1121 13:44:08.312669 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:08 crc kubenswrapper[5133]: I1121 13:44:08.312719 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:08 crc kubenswrapper[5133]: I1121 13:44:08.312737 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:08 crc kubenswrapper[5133]: I1121 13:44:08.312758 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:08 crc kubenswrapper[5133]: I1121 13:44:08.312778 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:08Z","lastTransitionTime":"2025-11-21T13:44:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:08 crc kubenswrapper[5133]: I1121 13:44:08.415600 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:08 crc kubenswrapper[5133]: I1121 13:44:08.415664 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:08 crc kubenswrapper[5133]: I1121 13:44:08.415686 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:08 crc kubenswrapper[5133]: I1121 13:44:08.415720 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:08 crc kubenswrapper[5133]: I1121 13:44:08.415742 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:08Z","lastTransitionTime":"2025-11-21T13:44:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:08 crc kubenswrapper[5133]: I1121 13:44:08.457535 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:44:08 crc kubenswrapper[5133]: E1121 13:44:08.458470 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 13:44:08 crc kubenswrapper[5133]: I1121 13:44:08.458884 5133 scope.go:117] "RemoveContainer" containerID="5ccbca4b83db30237624e807299bb17bb84a66216f27148373ea648a4c0cc962" Nov 21 13:44:08 crc kubenswrapper[5133]: E1121 13:44:08.459247 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-tjzm8_openshift-ovn-kubernetes(373d5da7-fae9-4689-9ede-6e2d69a54c02)\"" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" podUID="373d5da7-fae9-4689-9ede-6e2d69a54c02" Nov 21 13:44:08 crc kubenswrapper[5133]: I1121 13:44:08.485487 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Nov 21 13:44:08 crc kubenswrapper[5133]: I1121 13:44:08.519278 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:08 crc kubenswrapper[5133]: I1121 13:44:08.519322 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:08 crc kubenswrapper[5133]: I1121 13:44:08.519333 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:08 crc kubenswrapper[5133]: I1121 13:44:08.519351 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:08 crc kubenswrapper[5133]: I1121 13:44:08.519363 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:08Z","lastTransitionTime":"2025-11-21T13:44:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:08 crc kubenswrapper[5133]: I1121 13:44:08.622433 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:08 crc kubenswrapper[5133]: I1121 13:44:08.622491 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:08 crc kubenswrapper[5133]: I1121 13:44:08.622507 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:08 crc kubenswrapper[5133]: I1121 13:44:08.622529 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:08 crc kubenswrapper[5133]: I1121 13:44:08.622545 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:08Z","lastTransitionTime":"2025-11-21T13:44:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:08 crc kubenswrapper[5133]: I1121 13:44:08.725643 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:08 crc kubenswrapper[5133]: I1121 13:44:08.725691 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:08 crc kubenswrapper[5133]: I1121 13:44:08.725703 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:08 crc kubenswrapper[5133]: I1121 13:44:08.725720 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:08 crc kubenswrapper[5133]: I1121 13:44:08.725729 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:08Z","lastTransitionTime":"2025-11-21T13:44:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:08 crc kubenswrapper[5133]: I1121 13:44:08.828224 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:08 crc kubenswrapper[5133]: I1121 13:44:08.828269 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:08 crc kubenswrapper[5133]: I1121 13:44:08.828285 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:08 crc kubenswrapper[5133]: I1121 13:44:08.828306 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:08 crc kubenswrapper[5133]: I1121 13:44:08.828321 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:08Z","lastTransitionTime":"2025-11-21T13:44:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:08 crc kubenswrapper[5133]: I1121 13:44:08.931416 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:08 crc kubenswrapper[5133]: I1121 13:44:08.931469 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:08 crc kubenswrapper[5133]: I1121 13:44:08.931480 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:08 crc kubenswrapper[5133]: I1121 13:44:08.931498 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:08 crc kubenswrapper[5133]: I1121 13:44:08.931511 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:08Z","lastTransitionTime":"2025-11-21T13:44:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:09 crc kubenswrapper[5133]: I1121 13:44:09.033930 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:09 crc kubenswrapper[5133]: I1121 13:44:09.034045 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:09 crc kubenswrapper[5133]: I1121 13:44:09.034070 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:09 crc kubenswrapper[5133]: I1121 13:44:09.034100 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:09 crc kubenswrapper[5133]: I1121 13:44:09.034126 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:09Z","lastTransitionTime":"2025-11-21T13:44:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:09 crc kubenswrapper[5133]: I1121 13:44:09.137473 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:09 crc kubenswrapper[5133]: I1121 13:44:09.137554 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:09 crc kubenswrapper[5133]: I1121 13:44:09.137575 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:09 crc kubenswrapper[5133]: I1121 13:44:09.137606 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:09 crc kubenswrapper[5133]: I1121 13:44:09.137630 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:09Z","lastTransitionTime":"2025-11-21T13:44:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:09 crc kubenswrapper[5133]: I1121 13:44:09.241575 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:09 crc kubenswrapper[5133]: I1121 13:44:09.241679 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:09 crc kubenswrapper[5133]: I1121 13:44:09.241708 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:09 crc kubenswrapper[5133]: I1121 13:44:09.241753 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:09 crc kubenswrapper[5133]: I1121 13:44:09.241783 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:09Z","lastTransitionTime":"2025-11-21T13:44:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:09 crc kubenswrapper[5133]: I1121 13:44:09.345454 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:09 crc kubenswrapper[5133]: I1121 13:44:09.345537 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:09 crc kubenswrapper[5133]: I1121 13:44:09.345554 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:09 crc kubenswrapper[5133]: I1121 13:44:09.345576 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:09 crc kubenswrapper[5133]: I1121 13:44:09.345590 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:09Z","lastTransitionTime":"2025-11-21T13:44:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:09 crc kubenswrapper[5133]: I1121 13:44:09.448560 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:09 crc kubenswrapper[5133]: I1121 13:44:09.448612 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:09 crc kubenswrapper[5133]: I1121 13:44:09.448624 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:09 crc kubenswrapper[5133]: I1121 13:44:09.448642 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:09 crc kubenswrapper[5133]: I1121 13:44:09.448654 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:09Z","lastTransitionTime":"2025-11-21T13:44:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:09 crc kubenswrapper[5133]: I1121 13:44:09.456817 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:44:09 crc kubenswrapper[5133]: I1121 13:44:09.456828 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:44:09 crc kubenswrapper[5133]: I1121 13:44:09.457034 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-x5wnh" Nov 21 13:44:09 crc kubenswrapper[5133]: E1121 13:44:09.457135 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 13:44:09 crc kubenswrapper[5133]: E1121 13:44:09.457218 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-x5wnh" podUID="b3aabda0-97d9-4886-8909-1c423c4d3238" Nov 21 13:44:09 crc kubenswrapper[5133]: E1121 13:44:09.457302 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 13:44:09 crc kubenswrapper[5133]: I1121 13:44:09.552844 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:09 crc kubenswrapper[5133]: I1121 13:44:09.552894 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:09 crc kubenswrapper[5133]: I1121 13:44:09.552912 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:09 crc kubenswrapper[5133]: I1121 13:44:09.552936 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:09 crc kubenswrapper[5133]: I1121 13:44:09.552954 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:09Z","lastTransitionTime":"2025-11-21T13:44:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:09 crc kubenswrapper[5133]: I1121 13:44:09.654909 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:09 crc kubenswrapper[5133]: I1121 13:44:09.654952 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:09 crc kubenswrapper[5133]: I1121 13:44:09.654963 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:09 crc kubenswrapper[5133]: I1121 13:44:09.654981 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:09 crc kubenswrapper[5133]: I1121 13:44:09.654992 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:09Z","lastTransitionTime":"2025-11-21T13:44:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:09 crc kubenswrapper[5133]: I1121 13:44:09.758106 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:09 crc kubenswrapper[5133]: I1121 13:44:09.758183 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:09 crc kubenswrapper[5133]: I1121 13:44:09.758199 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:09 crc kubenswrapper[5133]: I1121 13:44:09.758216 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:09 crc kubenswrapper[5133]: I1121 13:44:09.758228 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:09Z","lastTransitionTime":"2025-11-21T13:44:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:09 crc kubenswrapper[5133]: I1121 13:44:09.861029 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:09 crc kubenswrapper[5133]: I1121 13:44:09.861084 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:09 crc kubenswrapper[5133]: I1121 13:44:09.861101 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:09 crc kubenswrapper[5133]: I1121 13:44:09.861118 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:09 crc kubenswrapper[5133]: I1121 13:44:09.861131 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:09Z","lastTransitionTime":"2025-11-21T13:44:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:09 crc kubenswrapper[5133]: I1121 13:44:09.964045 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:09 crc kubenswrapper[5133]: I1121 13:44:09.964105 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:09 crc kubenswrapper[5133]: I1121 13:44:09.964126 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:09 crc kubenswrapper[5133]: I1121 13:44:09.964151 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:09 crc kubenswrapper[5133]: I1121 13:44:09.964169 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:09Z","lastTransitionTime":"2025-11-21T13:44:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:10 crc kubenswrapper[5133]: I1121 13:44:10.068099 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:10 crc kubenswrapper[5133]: I1121 13:44:10.068181 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:10 crc kubenswrapper[5133]: I1121 13:44:10.068204 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:10 crc kubenswrapper[5133]: I1121 13:44:10.068234 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:10 crc kubenswrapper[5133]: I1121 13:44:10.068256 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:10Z","lastTransitionTime":"2025-11-21T13:44:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:10 crc kubenswrapper[5133]: I1121 13:44:10.170717 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:10 crc kubenswrapper[5133]: I1121 13:44:10.170751 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:10 crc kubenswrapper[5133]: I1121 13:44:10.170759 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:10 crc kubenswrapper[5133]: I1121 13:44:10.170772 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:10 crc kubenswrapper[5133]: I1121 13:44:10.170783 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:10Z","lastTransitionTime":"2025-11-21T13:44:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:10 crc kubenswrapper[5133]: I1121 13:44:10.273955 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:10 crc kubenswrapper[5133]: I1121 13:44:10.274071 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:10 crc kubenswrapper[5133]: I1121 13:44:10.274088 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:10 crc kubenswrapper[5133]: I1121 13:44:10.274105 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:10 crc kubenswrapper[5133]: I1121 13:44:10.274145 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:10Z","lastTransitionTime":"2025-11-21T13:44:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:10 crc kubenswrapper[5133]: I1121 13:44:10.377518 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:10 crc kubenswrapper[5133]: I1121 13:44:10.377601 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:10 crc kubenswrapper[5133]: I1121 13:44:10.377621 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:10 crc kubenswrapper[5133]: I1121 13:44:10.377649 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:10 crc kubenswrapper[5133]: I1121 13:44:10.377668 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:10Z","lastTransitionTime":"2025-11-21T13:44:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:10 crc kubenswrapper[5133]: I1121 13:44:10.457617 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:44:10 crc kubenswrapper[5133]: E1121 13:44:10.457817 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 13:44:10 crc kubenswrapper[5133]: I1121 13:44:10.479564 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:10 crc kubenswrapper[5133]: I1121 13:44:10.479599 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:10 crc kubenswrapper[5133]: I1121 13:44:10.479607 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:10 crc kubenswrapper[5133]: I1121 13:44:10.479618 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:10 crc kubenswrapper[5133]: I1121 13:44:10.479628 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:10Z","lastTransitionTime":"2025-11-21T13:44:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:10 crc kubenswrapper[5133]: I1121 13:44:10.582372 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:10 crc kubenswrapper[5133]: I1121 13:44:10.582457 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:10 crc kubenswrapper[5133]: I1121 13:44:10.582486 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:10 crc kubenswrapper[5133]: I1121 13:44:10.582519 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:10 crc kubenswrapper[5133]: I1121 13:44:10.582539 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:10Z","lastTransitionTime":"2025-11-21T13:44:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:10 crc kubenswrapper[5133]: I1121 13:44:10.685592 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:10 crc kubenswrapper[5133]: I1121 13:44:10.685677 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:10 crc kubenswrapper[5133]: I1121 13:44:10.685707 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:10 crc kubenswrapper[5133]: I1121 13:44:10.685748 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:10 crc kubenswrapper[5133]: I1121 13:44:10.685769 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:10Z","lastTransitionTime":"2025-11-21T13:44:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:10 crc kubenswrapper[5133]: I1121 13:44:10.789274 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:10 crc kubenswrapper[5133]: I1121 13:44:10.789346 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:10 crc kubenswrapper[5133]: I1121 13:44:10.789365 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:10 crc kubenswrapper[5133]: I1121 13:44:10.789390 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:10 crc kubenswrapper[5133]: I1121 13:44:10.789407 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:10Z","lastTransitionTime":"2025-11-21T13:44:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:10 crc kubenswrapper[5133]: I1121 13:44:10.891648 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:10 crc kubenswrapper[5133]: I1121 13:44:10.891706 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:10 crc kubenswrapper[5133]: I1121 13:44:10.891725 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:10 crc kubenswrapper[5133]: I1121 13:44:10.891746 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:10 crc kubenswrapper[5133]: I1121 13:44:10.891758 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:10Z","lastTransitionTime":"2025-11-21T13:44:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:10 crc kubenswrapper[5133]: I1121 13:44:10.995428 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:10 crc kubenswrapper[5133]: I1121 13:44:10.995494 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:10 crc kubenswrapper[5133]: I1121 13:44:10.995506 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:10 crc kubenswrapper[5133]: I1121 13:44:10.995523 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:10 crc kubenswrapper[5133]: I1121 13:44:10.995535 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:10Z","lastTransitionTime":"2025-11-21T13:44:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:11 crc kubenswrapper[5133]: I1121 13:44:11.099098 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:11 crc kubenswrapper[5133]: I1121 13:44:11.099168 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:11 crc kubenswrapper[5133]: I1121 13:44:11.099188 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:11 crc kubenswrapper[5133]: I1121 13:44:11.099214 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:11 crc kubenswrapper[5133]: I1121 13:44:11.099232 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:11Z","lastTransitionTime":"2025-11-21T13:44:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:11 crc kubenswrapper[5133]: I1121 13:44:11.202788 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:11 crc kubenswrapper[5133]: I1121 13:44:11.202863 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:11 crc kubenswrapper[5133]: I1121 13:44:11.202882 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:11 crc kubenswrapper[5133]: I1121 13:44:11.202907 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:11 crc kubenswrapper[5133]: I1121 13:44:11.202928 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:11Z","lastTransitionTime":"2025-11-21T13:44:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:11 crc kubenswrapper[5133]: I1121 13:44:11.229213 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b3aabda0-97d9-4886-8909-1c423c4d3238-metrics-certs\") pod \"network-metrics-daemon-x5wnh\" (UID: \"b3aabda0-97d9-4886-8909-1c423c4d3238\") " pod="openshift-multus/network-metrics-daemon-x5wnh" Nov 21 13:44:11 crc kubenswrapper[5133]: E1121 13:44:11.229392 5133 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 21 13:44:11 crc kubenswrapper[5133]: E1121 13:44:11.229472 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b3aabda0-97d9-4886-8909-1c423c4d3238-metrics-certs podName:b3aabda0-97d9-4886-8909-1c423c4d3238 nodeName:}" failed. No retries permitted until 2025-11-21 13:45:15.229449873 +0000 UTC m=+175.027282161 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/b3aabda0-97d9-4886-8909-1c423c4d3238-metrics-certs") pod "network-metrics-daemon-x5wnh" (UID: "b3aabda0-97d9-4886-8909-1c423c4d3238") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 21 13:44:11 crc kubenswrapper[5133]: I1121 13:44:11.305862 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:11 crc kubenswrapper[5133]: I1121 13:44:11.305909 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:11 crc kubenswrapper[5133]: I1121 13:44:11.305926 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:11 crc kubenswrapper[5133]: I1121 13:44:11.305948 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:11 crc kubenswrapper[5133]: I1121 13:44:11.305968 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:11Z","lastTransitionTime":"2025-11-21T13:44:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:11 crc kubenswrapper[5133]: I1121 13:44:11.409017 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:11 crc kubenswrapper[5133]: I1121 13:44:11.409072 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:11 crc kubenswrapper[5133]: I1121 13:44:11.409085 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:11 crc kubenswrapper[5133]: I1121 13:44:11.409108 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:11 crc kubenswrapper[5133]: I1121 13:44:11.409122 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:11Z","lastTransitionTime":"2025-11-21T13:44:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:11 crc kubenswrapper[5133]: I1121 13:44:11.457241 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:44:11 crc kubenswrapper[5133]: I1121 13:44:11.457361 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:44:11 crc kubenswrapper[5133]: I1121 13:44:11.457375 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-x5wnh" Nov 21 13:44:11 crc kubenswrapper[5133]: E1121 13:44:11.457509 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 13:44:11 crc kubenswrapper[5133]: E1121 13:44:11.457617 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 13:44:11 crc kubenswrapper[5133]: E1121 13:44:11.457819 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-x5wnh" podUID="b3aabda0-97d9-4886-8909-1c423c4d3238" Nov 21 13:44:11 crc kubenswrapper[5133]: I1121 13:44:11.511762 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:11 crc kubenswrapper[5133]: I1121 13:44:11.511861 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:11 crc kubenswrapper[5133]: I1121 13:44:11.511887 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:11 crc kubenswrapper[5133]: I1121 13:44:11.511918 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:11 crc kubenswrapper[5133]: I1121 13:44:11.511937 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:11Z","lastTransitionTime":"2025-11-21T13:44:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:11 crc kubenswrapper[5133]: I1121 13:44:11.614909 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:11 crc kubenswrapper[5133]: I1121 13:44:11.614959 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:11 crc kubenswrapper[5133]: I1121 13:44:11.614973 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:11 crc kubenswrapper[5133]: I1121 13:44:11.614993 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:11 crc kubenswrapper[5133]: I1121 13:44:11.615038 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:11Z","lastTransitionTime":"2025-11-21T13:44:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:11 crc kubenswrapper[5133]: I1121 13:44:11.718579 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:11 crc kubenswrapper[5133]: I1121 13:44:11.718625 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:11 crc kubenswrapper[5133]: I1121 13:44:11.718637 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:11 crc kubenswrapper[5133]: I1121 13:44:11.718654 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:11 crc kubenswrapper[5133]: I1121 13:44:11.718666 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:11Z","lastTransitionTime":"2025-11-21T13:44:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:11 crc kubenswrapper[5133]: I1121 13:44:11.821899 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:11 crc kubenswrapper[5133]: I1121 13:44:11.822312 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:11 crc kubenswrapper[5133]: I1121 13:44:11.822490 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:11 crc kubenswrapper[5133]: I1121 13:44:11.822665 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:11 crc kubenswrapper[5133]: I1121 13:44:11.822803 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:11Z","lastTransitionTime":"2025-11-21T13:44:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:11 crc kubenswrapper[5133]: I1121 13:44:11.927437 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:11 crc kubenswrapper[5133]: I1121 13:44:11.927505 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:11 crc kubenswrapper[5133]: I1121 13:44:11.927521 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:11 crc kubenswrapper[5133]: I1121 13:44:11.927543 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:11 crc kubenswrapper[5133]: I1121 13:44:11.927560 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:11Z","lastTransitionTime":"2025-11-21T13:44:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:12 crc kubenswrapper[5133]: I1121 13:44:12.030558 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:12 crc kubenswrapper[5133]: I1121 13:44:12.031055 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:12 crc kubenswrapper[5133]: I1121 13:44:12.031122 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:12 crc kubenswrapper[5133]: I1121 13:44:12.031207 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:12 crc kubenswrapper[5133]: I1121 13:44:12.031289 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:12Z","lastTransitionTime":"2025-11-21T13:44:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:12 crc kubenswrapper[5133]: I1121 13:44:12.134831 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:12 crc kubenswrapper[5133]: I1121 13:44:12.134909 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:12 crc kubenswrapper[5133]: I1121 13:44:12.134931 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:12 crc kubenswrapper[5133]: I1121 13:44:12.134958 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:12 crc kubenswrapper[5133]: I1121 13:44:12.134976 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:12Z","lastTransitionTime":"2025-11-21T13:44:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:12 crc kubenswrapper[5133]: I1121 13:44:12.238626 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:12 crc kubenswrapper[5133]: I1121 13:44:12.238698 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:12 crc kubenswrapper[5133]: I1121 13:44:12.238721 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:12 crc kubenswrapper[5133]: I1121 13:44:12.238754 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:12 crc kubenswrapper[5133]: I1121 13:44:12.238779 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:12Z","lastTransitionTime":"2025-11-21T13:44:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:12 crc kubenswrapper[5133]: I1121 13:44:12.341754 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:12 crc kubenswrapper[5133]: I1121 13:44:12.341817 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:12 crc kubenswrapper[5133]: I1121 13:44:12.341836 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:12 crc kubenswrapper[5133]: I1121 13:44:12.341862 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:12 crc kubenswrapper[5133]: I1121 13:44:12.341882 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:12Z","lastTransitionTime":"2025-11-21T13:44:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:12 crc kubenswrapper[5133]: I1121 13:44:12.444905 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:12 crc kubenswrapper[5133]: I1121 13:44:12.445143 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:12 crc kubenswrapper[5133]: I1121 13:44:12.445171 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:12 crc kubenswrapper[5133]: I1121 13:44:12.445208 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:12 crc kubenswrapper[5133]: I1121 13:44:12.445239 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:12Z","lastTransitionTime":"2025-11-21T13:44:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:12 crc kubenswrapper[5133]: I1121 13:44:12.457365 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:44:12 crc kubenswrapper[5133]: E1121 13:44:12.457599 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 13:44:12 crc kubenswrapper[5133]: I1121 13:44:12.522784 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=82.522745361 podStartE2EDuration="1m22.522745361s" podCreationTimestamp="2025-11-21 13:42:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 13:44:12.521426505 +0000 UTC m=+112.319258793" watchObservedRunningTime="2025-11-21 13:44:12.522745361 +0000 UTC m=+112.320577649" Nov 21 13:44:12 crc kubenswrapper[5133]: I1121 13:44:12.551482 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:12 crc kubenswrapper[5133]: I1121 13:44:12.551535 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:12 crc kubenswrapper[5133]: I1121 13:44:12.551547 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:12 crc kubenswrapper[5133]: I1121 13:44:12.551567 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:12 crc kubenswrapper[5133]: I1121 13:44:12.551580 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:12Z","lastTransitionTime":"2025-11-21T13:44:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:12 crc kubenswrapper[5133]: I1121 13:44:12.587422 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-bj52j" podStartSLOduration=80.587399177 podStartE2EDuration="1m20.587399177s" podCreationTimestamp="2025-11-21 13:42:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 13:44:12.568397574 +0000 UTC m=+112.366229832" watchObservedRunningTime="2025-11-21 13:44:12.587399177 +0000 UTC m=+112.385231435" Nov 21 13:44:12 crc kubenswrapper[5133]: I1121 13:44:12.607176 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-m5d24" podStartSLOduration=80.607163391 podStartE2EDuration="1m20.607163391s" podCreationTimestamp="2025-11-21 13:42:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 13:44:12.606413511 +0000 UTC m=+112.404245759" watchObservedRunningTime="2025-11-21 13:44:12.607163391 +0000 UTC m=+112.404995639" Nov 21 13:44:12 crc kubenswrapper[5133]: I1121 13:44:12.649789 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podStartSLOduration=80.649763652 podStartE2EDuration="1m20.649763652s" podCreationTimestamp="2025-11-21 13:42:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 13:44:12.623354208 +0000 UTC m=+112.421186456" watchObservedRunningTime="2025-11-21 13:44:12.649763652 +0000 UTC m=+112.447595900" Nov 21 13:44:12 crc kubenswrapper[5133]: I1121 13:44:12.655137 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:12 crc kubenswrapper[5133]: I1121 13:44:12.655170 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:12 crc kubenswrapper[5133]: I1121 13:44:12.655182 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:12 crc kubenswrapper[5133]: I1121 13:44:12.655198 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:12 crc kubenswrapper[5133]: I1121 13:44:12.655208 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:12Z","lastTransitionTime":"2025-11-21T13:44:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:12 crc kubenswrapper[5133]: I1121 13:44:12.673842 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-b9v8b" podStartSLOduration=80.673823471 podStartE2EDuration="1m20.673823471s" podCreationTimestamp="2025-11-21 13:42:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 13:44:12.6726722 +0000 UTC m=+112.470504448" watchObservedRunningTime="2025-11-21 13:44:12.673823471 +0000 UTC m=+112.471655719" Nov 21 13:44:12 crc kubenswrapper[5133]: I1121 13:44:12.698256 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=84.698235381 podStartE2EDuration="1m24.698235381s" podCreationTimestamp="2025-11-21 13:42:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 13:44:12.697908562 +0000 UTC m=+112.495740820" watchObservedRunningTime="2025-11-21 13:44:12.698235381 +0000 UTC m=+112.496067639" Nov 21 13:44:12 crc kubenswrapper[5133]: I1121 13:44:12.757602 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:12 crc kubenswrapper[5133]: I1121 13:44:12.757649 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:12 crc kubenswrapper[5133]: I1121 13:44:12.757660 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:12 crc kubenswrapper[5133]: I1121 13:44:12.757678 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:12 crc kubenswrapper[5133]: I1121 13:44:12.757690 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:12Z","lastTransitionTime":"2025-11-21T13:44:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:12 crc kubenswrapper[5133]: I1121 13:44:12.794061 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=53.793978247 podStartE2EDuration="53.793978247s" podCreationTimestamp="2025-11-21 13:43:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 13:44:12.793627197 +0000 UTC m=+112.591459445" watchObservedRunningTime="2025-11-21 13:44:12.793978247 +0000 UTC m=+112.591810485" Nov 21 13:44:12 crc kubenswrapper[5133]: I1121 13:44:12.794262 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-7vfdg" podStartSLOduration=79.794256694 podStartE2EDuration="1m19.794256694s" podCreationTimestamp="2025-11-21 13:42:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 13:44:12.777392489 +0000 UTC m=+112.575224737" watchObservedRunningTime="2025-11-21 13:44:12.794256694 +0000 UTC m=+112.592088942" Nov 21 13:44:12 crc kubenswrapper[5133]: I1121 13:44:12.807348 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=35.807330217 podStartE2EDuration="35.807330217s" podCreationTimestamp="2025-11-21 13:43:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 13:44:12.807017189 +0000 UTC m=+112.604849437" watchObservedRunningTime="2025-11-21 13:44:12.807330217 +0000 UTC m=+112.605162465" Nov 21 13:44:12 crc kubenswrapper[5133]: I1121 13:44:12.832096 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=4.832080676 podStartE2EDuration="4.832080676s" podCreationTimestamp="2025-11-21 13:44:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 13:44:12.831540551 +0000 UTC m=+112.629372829" watchObservedRunningTime="2025-11-21 13:44:12.832080676 +0000 UTC m=+112.629912924" Nov 21 13:44:12 crc kubenswrapper[5133]: I1121 13:44:12.855290 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-pvdwc" podStartSLOduration=80.855272222 podStartE2EDuration="1m20.855272222s" podCreationTimestamp="2025-11-21 13:42:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 13:44:12.854533472 +0000 UTC m=+112.652365720" watchObservedRunningTime="2025-11-21 13:44:12.855272222 +0000 UTC m=+112.653104470" Nov 21 13:44:12 crc kubenswrapper[5133]: I1121 13:44:12.860057 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:12 crc kubenswrapper[5133]: I1121 13:44:12.860105 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:12 crc kubenswrapper[5133]: I1121 13:44:12.860114 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:12 crc kubenswrapper[5133]: I1121 13:44:12.860129 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:12 crc kubenswrapper[5133]: I1121 13:44:12.860139 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:12Z","lastTransitionTime":"2025-11-21T13:44:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:12 crc kubenswrapper[5133]: I1121 13:44:12.961946 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:12 crc kubenswrapper[5133]: I1121 13:44:12.961975 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:12 crc kubenswrapper[5133]: I1121 13:44:12.961984 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:12 crc kubenswrapper[5133]: I1121 13:44:12.962000 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:12 crc kubenswrapper[5133]: I1121 13:44:12.962010 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:12Z","lastTransitionTime":"2025-11-21T13:44:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:13 crc kubenswrapper[5133]: I1121 13:44:13.064376 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:13 crc kubenswrapper[5133]: I1121 13:44:13.064668 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:13 crc kubenswrapper[5133]: I1121 13:44:13.064768 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:13 crc kubenswrapper[5133]: I1121 13:44:13.064860 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:13 crc kubenswrapper[5133]: I1121 13:44:13.064958 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:13Z","lastTransitionTime":"2025-11-21T13:44:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:13 crc kubenswrapper[5133]: I1121 13:44:13.167194 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:13 crc kubenswrapper[5133]: I1121 13:44:13.167234 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:13 crc kubenswrapper[5133]: I1121 13:44:13.167247 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:13 crc kubenswrapper[5133]: I1121 13:44:13.167261 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:13 crc kubenswrapper[5133]: I1121 13:44:13.167273 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:13Z","lastTransitionTime":"2025-11-21T13:44:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:13 crc kubenswrapper[5133]: I1121 13:44:13.270169 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:13 crc kubenswrapper[5133]: I1121 13:44:13.270244 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:13 crc kubenswrapper[5133]: I1121 13:44:13.270264 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:13 crc kubenswrapper[5133]: I1121 13:44:13.270290 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:13 crc kubenswrapper[5133]: I1121 13:44:13.270308 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:13Z","lastTransitionTime":"2025-11-21T13:44:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:13 crc kubenswrapper[5133]: I1121 13:44:13.373673 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:13 crc kubenswrapper[5133]: I1121 13:44:13.373765 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:13 crc kubenswrapper[5133]: I1121 13:44:13.373789 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:13 crc kubenswrapper[5133]: I1121 13:44:13.373814 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:13 crc kubenswrapper[5133]: I1121 13:44:13.373832 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:13Z","lastTransitionTime":"2025-11-21T13:44:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:13 crc kubenswrapper[5133]: I1121 13:44:13.457133 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-x5wnh" Nov 21 13:44:13 crc kubenswrapper[5133]: I1121 13:44:13.457197 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:44:13 crc kubenswrapper[5133]: I1121 13:44:13.457134 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:44:13 crc kubenswrapper[5133]: E1121 13:44:13.457281 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-x5wnh" podUID="b3aabda0-97d9-4886-8909-1c423c4d3238" Nov 21 13:44:13 crc kubenswrapper[5133]: E1121 13:44:13.457379 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 13:44:13 crc kubenswrapper[5133]: E1121 13:44:13.457476 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 13:44:13 crc kubenswrapper[5133]: I1121 13:44:13.475871 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:13 crc kubenswrapper[5133]: I1121 13:44:13.475917 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:13 crc kubenswrapper[5133]: I1121 13:44:13.475929 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:13 crc kubenswrapper[5133]: I1121 13:44:13.475947 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:13 crc kubenswrapper[5133]: I1121 13:44:13.475961 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:13Z","lastTransitionTime":"2025-11-21T13:44:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:13 crc kubenswrapper[5133]: I1121 13:44:13.578604 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:13 crc kubenswrapper[5133]: I1121 13:44:13.578661 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:13 crc kubenswrapper[5133]: I1121 13:44:13.578679 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:13 crc kubenswrapper[5133]: I1121 13:44:13.578704 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:13 crc kubenswrapper[5133]: I1121 13:44:13.578726 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:13Z","lastTransitionTime":"2025-11-21T13:44:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:13 crc kubenswrapper[5133]: I1121 13:44:13.680513 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:13 crc kubenswrapper[5133]: I1121 13:44:13.680554 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:13 crc kubenswrapper[5133]: I1121 13:44:13.680565 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:13 crc kubenswrapper[5133]: I1121 13:44:13.680582 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:13 crc kubenswrapper[5133]: I1121 13:44:13.680594 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:13Z","lastTransitionTime":"2025-11-21T13:44:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:13 crc kubenswrapper[5133]: I1121 13:44:13.783493 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:13 crc kubenswrapper[5133]: I1121 13:44:13.783540 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:13 crc kubenswrapper[5133]: I1121 13:44:13.783551 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:13 crc kubenswrapper[5133]: I1121 13:44:13.783577 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:13 crc kubenswrapper[5133]: I1121 13:44:13.783590 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:13Z","lastTransitionTime":"2025-11-21T13:44:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:13 crc kubenswrapper[5133]: I1121 13:44:13.885683 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:13 crc kubenswrapper[5133]: I1121 13:44:13.885750 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:13 crc kubenswrapper[5133]: I1121 13:44:13.885770 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:13 crc kubenswrapper[5133]: I1121 13:44:13.885798 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:13 crc kubenswrapper[5133]: I1121 13:44:13.885821 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:13Z","lastTransitionTime":"2025-11-21T13:44:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:13 crc kubenswrapper[5133]: I1121 13:44:13.988731 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:13 crc kubenswrapper[5133]: I1121 13:44:13.988785 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:13 crc kubenswrapper[5133]: I1121 13:44:13.988797 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:13 crc kubenswrapper[5133]: I1121 13:44:13.988818 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:13 crc kubenswrapper[5133]: I1121 13:44:13.988834 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:13Z","lastTransitionTime":"2025-11-21T13:44:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:14 crc kubenswrapper[5133]: I1121 13:44:14.092040 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:14 crc kubenswrapper[5133]: I1121 13:44:14.092099 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:14 crc kubenswrapper[5133]: I1121 13:44:14.092112 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:14 crc kubenswrapper[5133]: I1121 13:44:14.092131 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:14 crc kubenswrapper[5133]: I1121 13:44:14.092144 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:14Z","lastTransitionTime":"2025-11-21T13:44:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:14 crc kubenswrapper[5133]: I1121 13:44:14.197099 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:14 crc kubenswrapper[5133]: I1121 13:44:14.197172 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:14 crc kubenswrapper[5133]: I1121 13:44:14.197187 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:14 crc kubenswrapper[5133]: I1121 13:44:14.197212 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:14 crc kubenswrapper[5133]: I1121 13:44:14.197236 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:14Z","lastTransitionTime":"2025-11-21T13:44:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:14 crc kubenswrapper[5133]: I1121 13:44:14.299859 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:14 crc kubenswrapper[5133]: I1121 13:44:14.299957 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:14 crc kubenswrapper[5133]: I1121 13:44:14.299976 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:14 crc kubenswrapper[5133]: I1121 13:44:14.300032 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:14 crc kubenswrapper[5133]: I1121 13:44:14.300052 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:14Z","lastTransitionTime":"2025-11-21T13:44:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:14 crc kubenswrapper[5133]: I1121 13:44:14.403071 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:14 crc kubenswrapper[5133]: I1121 13:44:14.403143 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:14 crc kubenswrapper[5133]: I1121 13:44:14.403163 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:14 crc kubenswrapper[5133]: I1121 13:44:14.403201 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:14 crc kubenswrapper[5133]: I1121 13:44:14.403221 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:14Z","lastTransitionTime":"2025-11-21T13:44:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:14 crc kubenswrapper[5133]: I1121 13:44:14.456556 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:44:14 crc kubenswrapper[5133]: E1121 13:44:14.456751 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 13:44:14 crc kubenswrapper[5133]: I1121 13:44:14.505936 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:14 crc kubenswrapper[5133]: I1121 13:44:14.505991 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:14 crc kubenswrapper[5133]: I1121 13:44:14.506068 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:14 crc kubenswrapper[5133]: I1121 13:44:14.506099 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:14 crc kubenswrapper[5133]: I1121 13:44:14.506122 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:14Z","lastTransitionTime":"2025-11-21T13:44:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:14 crc kubenswrapper[5133]: I1121 13:44:14.609363 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:14 crc kubenswrapper[5133]: I1121 13:44:14.609481 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:14 crc kubenswrapper[5133]: I1121 13:44:14.609500 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:14 crc kubenswrapper[5133]: I1121 13:44:14.609520 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:14 crc kubenswrapper[5133]: I1121 13:44:14.609536 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:14Z","lastTransitionTime":"2025-11-21T13:44:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:14 crc kubenswrapper[5133]: I1121 13:44:14.712766 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:14 crc kubenswrapper[5133]: I1121 13:44:14.712841 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:14 crc kubenswrapper[5133]: I1121 13:44:14.712866 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:14 crc kubenswrapper[5133]: I1121 13:44:14.712899 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:14 crc kubenswrapper[5133]: I1121 13:44:14.712924 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:14Z","lastTransitionTime":"2025-11-21T13:44:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:14 crc kubenswrapper[5133]: I1121 13:44:14.816193 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:14 crc kubenswrapper[5133]: I1121 13:44:14.816246 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:14 crc kubenswrapper[5133]: I1121 13:44:14.816258 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:14 crc kubenswrapper[5133]: I1121 13:44:14.816277 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:14 crc kubenswrapper[5133]: I1121 13:44:14.816291 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:14Z","lastTransitionTime":"2025-11-21T13:44:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:14 crc kubenswrapper[5133]: I1121 13:44:14.918705 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:14 crc kubenswrapper[5133]: I1121 13:44:14.918746 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:14 crc kubenswrapper[5133]: I1121 13:44:14.918757 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:14 crc kubenswrapper[5133]: I1121 13:44:14.918771 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:14 crc kubenswrapper[5133]: I1121 13:44:14.918782 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:14Z","lastTransitionTime":"2025-11-21T13:44:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:15 crc kubenswrapper[5133]: I1121 13:44:15.021832 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:15 crc kubenswrapper[5133]: I1121 13:44:15.021865 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:15 crc kubenswrapper[5133]: I1121 13:44:15.021873 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:15 crc kubenswrapper[5133]: I1121 13:44:15.021897 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:15 crc kubenswrapper[5133]: I1121 13:44:15.021907 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:15Z","lastTransitionTime":"2025-11-21T13:44:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:15 crc kubenswrapper[5133]: I1121 13:44:15.125373 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:15 crc kubenswrapper[5133]: I1121 13:44:15.125415 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:15 crc kubenswrapper[5133]: I1121 13:44:15.125428 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:15 crc kubenswrapper[5133]: I1121 13:44:15.125449 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:15 crc kubenswrapper[5133]: I1121 13:44:15.125469 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:15Z","lastTransitionTime":"2025-11-21T13:44:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:15 crc kubenswrapper[5133]: I1121 13:44:15.227853 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:15 crc kubenswrapper[5133]: I1121 13:44:15.227907 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:15 crc kubenswrapper[5133]: I1121 13:44:15.227919 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:15 crc kubenswrapper[5133]: I1121 13:44:15.227936 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:15 crc kubenswrapper[5133]: I1121 13:44:15.227950 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:15Z","lastTransitionTime":"2025-11-21T13:44:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:15 crc kubenswrapper[5133]: I1121 13:44:15.330605 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:15 crc kubenswrapper[5133]: I1121 13:44:15.330647 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:15 crc kubenswrapper[5133]: I1121 13:44:15.330659 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:15 crc kubenswrapper[5133]: I1121 13:44:15.330675 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:15 crc kubenswrapper[5133]: I1121 13:44:15.330686 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:15Z","lastTransitionTime":"2025-11-21T13:44:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:15 crc kubenswrapper[5133]: I1121 13:44:15.434514 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:15 crc kubenswrapper[5133]: I1121 13:44:15.434593 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:15 crc kubenswrapper[5133]: I1121 13:44:15.434612 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:15 crc kubenswrapper[5133]: I1121 13:44:15.434635 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:15 crc kubenswrapper[5133]: I1121 13:44:15.434650 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:15Z","lastTransitionTime":"2025-11-21T13:44:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:15 crc kubenswrapper[5133]: I1121 13:44:15.457580 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-x5wnh" Nov 21 13:44:15 crc kubenswrapper[5133]: I1121 13:44:15.457701 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:44:15 crc kubenswrapper[5133]: E1121 13:44:15.457806 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-x5wnh" podUID="b3aabda0-97d9-4886-8909-1c423c4d3238" Nov 21 13:44:15 crc kubenswrapper[5133]: I1121 13:44:15.457815 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:44:15 crc kubenswrapper[5133]: E1121 13:44:15.457936 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 13:44:15 crc kubenswrapper[5133]: E1121 13:44:15.458190 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 13:44:15 crc kubenswrapper[5133]: I1121 13:44:15.538428 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:15 crc kubenswrapper[5133]: I1121 13:44:15.538485 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:15 crc kubenswrapper[5133]: I1121 13:44:15.538500 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:15 crc kubenswrapper[5133]: I1121 13:44:15.538524 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:15 crc kubenswrapper[5133]: I1121 13:44:15.538537 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:15Z","lastTransitionTime":"2025-11-21T13:44:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:15 crc kubenswrapper[5133]: I1121 13:44:15.641933 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:15 crc kubenswrapper[5133]: I1121 13:44:15.641983 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:15 crc kubenswrapper[5133]: I1121 13:44:15.642046 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:15 crc kubenswrapper[5133]: I1121 13:44:15.642068 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:15 crc kubenswrapper[5133]: I1121 13:44:15.642083 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:15Z","lastTransitionTime":"2025-11-21T13:44:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:15 crc kubenswrapper[5133]: I1121 13:44:15.745790 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:15 crc kubenswrapper[5133]: I1121 13:44:15.745851 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:15 crc kubenswrapper[5133]: I1121 13:44:15.745879 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:15 crc kubenswrapper[5133]: I1121 13:44:15.745912 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:15 crc kubenswrapper[5133]: I1121 13:44:15.745935 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:15Z","lastTransitionTime":"2025-11-21T13:44:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:15 crc kubenswrapper[5133]: I1121 13:44:15.850618 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:15 crc kubenswrapper[5133]: I1121 13:44:15.850712 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:15 crc kubenswrapper[5133]: I1121 13:44:15.850731 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:15 crc kubenswrapper[5133]: I1121 13:44:15.850761 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:15 crc kubenswrapper[5133]: I1121 13:44:15.850780 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:15Z","lastTransitionTime":"2025-11-21T13:44:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:15 crc kubenswrapper[5133]: I1121 13:44:15.954277 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:15 crc kubenswrapper[5133]: I1121 13:44:15.954359 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:15 crc kubenswrapper[5133]: I1121 13:44:15.954386 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:15 crc kubenswrapper[5133]: I1121 13:44:15.954416 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:15 crc kubenswrapper[5133]: I1121 13:44:15.954441 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:15Z","lastTransitionTime":"2025-11-21T13:44:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:16 crc kubenswrapper[5133]: I1121 13:44:16.057471 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:16 crc kubenswrapper[5133]: I1121 13:44:16.057527 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:16 crc kubenswrapper[5133]: I1121 13:44:16.057542 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:16 crc kubenswrapper[5133]: I1121 13:44:16.057563 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:16 crc kubenswrapper[5133]: I1121 13:44:16.057576 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:16Z","lastTransitionTime":"2025-11-21T13:44:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:16 crc kubenswrapper[5133]: I1121 13:44:16.160688 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:16 crc kubenswrapper[5133]: I1121 13:44:16.160778 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:16 crc kubenswrapper[5133]: I1121 13:44:16.160811 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:16 crc kubenswrapper[5133]: I1121 13:44:16.160848 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:16 crc kubenswrapper[5133]: I1121 13:44:16.160870 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:16Z","lastTransitionTime":"2025-11-21T13:44:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:16 crc kubenswrapper[5133]: I1121 13:44:16.264034 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:16 crc kubenswrapper[5133]: I1121 13:44:16.264099 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:16 crc kubenswrapper[5133]: I1121 13:44:16.264116 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:16 crc kubenswrapper[5133]: I1121 13:44:16.264143 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:16 crc kubenswrapper[5133]: I1121 13:44:16.264163 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:16Z","lastTransitionTime":"2025-11-21T13:44:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:16 crc kubenswrapper[5133]: I1121 13:44:16.367199 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:16 crc kubenswrapper[5133]: I1121 13:44:16.367264 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:16 crc kubenswrapper[5133]: I1121 13:44:16.367276 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:16 crc kubenswrapper[5133]: I1121 13:44:16.367298 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:16 crc kubenswrapper[5133]: I1121 13:44:16.367311 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:16Z","lastTransitionTime":"2025-11-21T13:44:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:16 crc kubenswrapper[5133]: I1121 13:44:16.456710 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:44:16 crc kubenswrapper[5133]: E1121 13:44:16.456970 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 13:44:16 crc kubenswrapper[5133]: I1121 13:44:16.470154 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:16 crc kubenswrapper[5133]: I1121 13:44:16.470202 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:16 crc kubenswrapper[5133]: I1121 13:44:16.470251 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:16 crc kubenswrapper[5133]: I1121 13:44:16.470274 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:16 crc kubenswrapper[5133]: I1121 13:44:16.470290 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:16Z","lastTransitionTime":"2025-11-21T13:44:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:16 crc kubenswrapper[5133]: I1121 13:44:16.574161 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:16 crc kubenswrapper[5133]: I1121 13:44:16.574242 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:16 crc kubenswrapper[5133]: I1121 13:44:16.574265 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:16 crc kubenswrapper[5133]: I1121 13:44:16.574293 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:16 crc kubenswrapper[5133]: I1121 13:44:16.574311 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:16Z","lastTransitionTime":"2025-11-21T13:44:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:16 crc kubenswrapper[5133]: I1121 13:44:16.675651 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 21 13:44:16 crc kubenswrapper[5133]: I1121 13:44:16.675732 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 21 13:44:16 crc kubenswrapper[5133]: I1121 13:44:16.675752 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 21 13:44:16 crc kubenswrapper[5133]: I1121 13:44:16.675779 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 21 13:44:16 crc kubenswrapper[5133]: I1121 13:44:16.675800 5133 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-21T13:44:16Z","lastTransitionTime":"2025-11-21T13:44:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 21 13:44:16 crc kubenswrapper[5133]: I1121 13:44:16.742379 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-kpxqh"] Nov 21 13:44:16 crc kubenswrapper[5133]: I1121 13:44:16.743559 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kpxqh" Nov 21 13:44:16 crc kubenswrapper[5133]: I1121 13:44:16.746627 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 21 13:44:16 crc kubenswrapper[5133]: I1121 13:44:16.746911 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 21 13:44:16 crc kubenswrapper[5133]: I1121 13:44:16.747787 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 21 13:44:16 crc kubenswrapper[5133]: I1121 13:44:16.751305 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 21 13:44:16 crc kubenswrapper[5133]: I1121 13:44:16.794187 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2706fe88-a992-42ad-81e7-8ca7f3ff7e13-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-kpxqh\" (UID: \"2706fe88-a992-42ad-81e7-8ca7f3ff7e13\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kpxqh" Nov 21 13:44:16 crc kubenswrapper[5133]: I1121 13:44:16.794426 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/2706fe88-a992-42ad-81e7-8ca7f3ff7e13-service-ca\") pod \"cluster-version-operator-5c965bbfc6-kpxqh\" (UID: \"2706fe88-a992-42ad-81e7-8ca7f3ff7e13\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kpxqh" Nov 21 13:44:16 crc kubenswrapper[5133]: I1121 13:44:16.794471 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/2706fe88-a992-42ad-81e7-8ca7f3ff7e13-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-kpxqh\" (UID: \"2706fe88-a992-42ad-81e7-8ca7f3ff7e13\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kpxqh" Nov 21 13:44:16 crc kubenswrapper[5133]: I1121 13:44:16.794541 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/2706fe88-a992-42ad-81e7-8ca7f3ff7e13-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-kpxqh\" (UID: \"2706fe88-a992-42ad-81e7-8ca7f3ff7e13\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kpxqh" Nov 21 13:44:16 crc kubenswrapper[5133]: I1121 13:44:16.794587 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2706fe88-a992-42ad-81e7-8ca7f3ff7e13-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-kpxqh\" (UID: \"2706fe88-a992-42ad-81e7-8ca7f3ff7e13\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kpxqh" Nov 21 13:44:16 crc kubenswrapper[5133]: I1121 13:44:16.895214 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/2706fe88-a992-42ad-81e7-8ca7f3ff7e13-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-kpxqh\" (UID: \"2706fe88-a992-42ad-81e7-8ca7f3ff7e13\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kpxqh" Nov 21 13:44:16 crc kubenswrapper[5133]: I1121 13:44:16.895279 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2706fe88-a992-42ad-81e7-8ca7f3ff7e13-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-kpxqh\" (UID: \"2706fe88-a992-42ad-81e7-8ca7f3ff7e13\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kpxqh" Nov 21 13:44:16 crc kubenswrapper[5133]: I1121 13:44:16.895308 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2706fe88-a992-42ad-81e7-8ca7f3ff7e13-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-kpxqh\" (UID: \"2706fe88-a992-42ad-81e7-8ca7f3ff7e13\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kpxqh" Nov 21 13:44:16 crc kubenswrapper[5133]: I1121 13:44:16.895354 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/2706fe88-a992-42ad-81e7-8ca7f3ff7e13-service-ca\") pod \"cluster-version-operator-5c965bbfc6-kpxqh\" (UID: \"2706fe88-a992-42ad-81e7-8ca7f3ff7e13\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kpxqh" Nov 21 13:44:16 crc kubenswrapper[5133]: I1121 13:44:16.895390 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/2706fe88-a992-42ad-81e7-8ca7f3ff7e13-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-kpxqh\" (UID: \"2706fe88-a992-42ad-81e7-8ca7f3ff7e13\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kpxqh" Nov 21 13:44:16 crc kubenswrapper[5133]: I1121 13:44:16.895503 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/2706fe88-a992-42ad-81e7-8ca7f3ff7e13-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-kpxqh\" (UID: \"2706fe88-a992-42ad-81e7-8ca7f3ff7e13\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kpxqh" Nov 21 13:44:16 crc kubenswrapper[5133]: I1121 13:44:16.895640 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/2706fe88-a992-42ad-81e7-8ca7f3ff7e13-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-kpxqh\" (UID: \"2706fe88-a992-42ad-81e7-8ca7f3ff7e13\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kpxqh" Nov 21 13:44:16 crc kubenswrapper[5133]: I1121 13:44:16.896424 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/2706fe88-a992-42ad-81e7-8ca7f3ff7e13-service-ca\") pod \"cluster-version-operator-5c965bbfc6-kpxqh\" (UID: \"2706fe88-a992-42ad-81e7-8ca7f3ff7e13\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kpxqh" Nov 21 13:44:16 crc kubenswrapper[5133]: I1121 13:44:16.909724 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2706fe88-a992-42ad-81e7-8ca7f3ff7e13-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-kpxqh\" (UID: \"2706fe88-a992-42ad-81e7-8ca7f3ff7e13\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kpxqh" Nov 21 13:44:16 crc kubenswrapper[5133]: I1121 13:44:16.917211 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2706fe88-a992-42ad-81e7-8ca7f3ff7e13-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-kpxqh\" (UID: \"2706fe88-a992-42ad-81e7-8ca7f3ff7e13\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kpxqh" Nov 21 13:44:17 crc kubenswrapper[5133]: I1121 13:44:17.078256 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kpxqh" Nov 21 13:44:17 crc kubenswrapper[5133]: I1121 13:44:17.179939 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kpxqh" event={"ID":"2706fe88-a992-42ad-81e7-8ca7f3ff7e13","Type":"ContainerStarted","Data":"a35bcfa399b89c06d5b4ed52cd90ad75cd7b4d012624b16b54864097dc364a53"} Nov 21 13:44:17 crc kubenswrapper[5133]: I1121 13:44:17.457369 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-x5wnh" Nov 21 13:44:17 crc kubenswrapper[5133]: I1121 13:44:17.457395 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:44:17 crc kubenswrapper[5133]: E1121 13:44:17.458133 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-x5wnh" podUID="b3aabda0-97d9-4886-8909-1c423c4d3238" Nov 21 13:44:17 crc kubenswrapper[5133]: I1121 13:44:17.457566 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:44:17 crc kubenswrapper[5133]: E1121 13:44:17.458338 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 13:44:17 crc kubenswrapper[5133]: E1121 13:44:17.458466 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 13:44:18 crc kubenswrapper[5133]: I1121 13:44:18.184905 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kpxqh" event={"ID":"2706fe88-a992-42ad-81e7-8ca7f3ff7e13","Type":"ContainerStarted","Data":"7e3be3bf793554a0aaa5288f4fae958c0e00151730d94a7f087695f2d3dc2ed0"} Nov 21 13:44:18 crc kubenswrapper[5133]: I1121 13:44:18.457275 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:44:18 crc kubenswrapper[5133]: E1121 13:44:18.457517 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 13:44:19 crc kubenswrapper[5133]: I1121 13:44:19.457186 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:44:19 crc kubenswrapper[5133]: I1121 13:44:19.457186 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-x5wnh" Nov 21 13:44:19 crc kubenswrapper[5133]: I1121 13:44:19.457262 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:44:19 crc kubenswrapper[5133]: I1121 13:44:19.458529 5133 scope.go:117] "RemoveContainer" containerID="5ccbca4b83db30237624e807299bb17bb84a66216f27148373ea648a4c0cc962" Nov 21 13:44:19 crc kubenswrapper[5133]: E1121 13:44:19.458639 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 13:44:19 crc kubenswrapper[5133]: E1121 13:44:19.458712 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 13:44:19 crc kubenswrapper[5133]: E1121 13:44:19.458537 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-x5wnh" podUID="b3aabda0-97d9-4886-8909-1c423c4d3238" Nov 21 13:44:19 crc kubenswrapper[5133]: E1121 13:44:19.458812 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-tjzm8_openshift-ovn-kubernetes(373d5da7-fae9-4689-9ede-6e2d69a54c02)\"" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" podUID="373d5da7-fae9-4689-9ede-6e2d69a54c02" Nov 21 13:44:20 crc kubenswrapper[5133]: I1121 13:44:20.457089 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:44:20 crc kubenswrapper[5133]: E1121 13:44:20.457282 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 13:44:21 crc kubenswrapper[5133]: I1121 13:44:21.456746 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-x5wnh" Nov 21 13:44:21 crc kubenswrapper[5133]: I1121 13:44:21.456792 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:44:21 crc kubenswrapper[5133]: I1121 13:44:21.456811 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:44:21 crc kubenswrapper[5133]: E1121 13:44:21.456902 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-x5wnh" podUID="b3aabda0-97d9-4886-8909-1c423c4d3238" Nov 21 13:44:21 crc kubenswrapper[5133]: E1121 13:44:21.457058 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 13:44:21 crc kubenswrapper[5133]: E1121 13:44:21.457159 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 13:44:22 crc kubenswrapper[5133]: E1121 13:44:22.282811 5133 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Nov 21 13:44:22 crc kubenswrapper[5133]: I1121 13:44:22.457421 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:44:22 crc kubenswrapper[5133]: E1121 13:44:22.459710 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 13:44:22 crc kubenswrapper[5133]: E1121 13:44:22.948703 5133 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 21 13:44:23 crc kubenswrapper[5133]: I1121 13:44:23.456840 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:44:23 crc kubenswrapper[5133]: I1121 13:44:23.456898 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-x5wnh" Nov 21 13:44:23 crc kubenswrapper[5133]: I1121 13:44:23.456853 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:44:23 crc kubenswrapper[5133]: E1121 13:44:23.457081 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 13:44:23 crc kubenswrapper[5133]: E1121 13:44:23.457334 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-x5wnh" podUID="b3aabda0-97d9-4886-8909-1c423c4d3238" Nov 21 13:44:23 crc kubenswrapper[5133]: E1121 13:44:23.457447 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 13:44:24 crc kubenswrapper[5133]: I1121 13:44:24.456854 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:44:24 crc kubenswrapper[5133]: E1121 13:44:24.457206 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 13:44:25 crc kubenswrapper[5133]: I1121 13:44:25.457499 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:44:25 crc kubenswrapper[5133]: I1121 13:44:25.457530 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:44:25 crc kubenswrapper[5133]: I1121 13:44:25.457495 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-x5wnh" Nov 21 13:44:25 crc kubenswrapper[5133]: E1121 13:44:25.457775 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 13:44:25 crc kubenswrapper[5133]: E1121 13:44:25.457647 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 13:44:25 crc kubenswrapper[5133]: E1121 13:44:25.458044 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-x5wnh" podUID="b3aabda0-97d9-4886-8909-1c423c4d3238" Nov 21 13:44:26 crc kubenswrapper[5133]: I1121 13:44:26.456972 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:44:26 crc kubenswrapper[5133]: E1121 13:44:26.457214 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 13:44:27 crc kubenswrapper[5133]: I1121 13:44:27.220506 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-m5d24_0077329a-abad-4c6d-a601-2dc01fd83184/kube-multus/1.log" Nov 21 13:44:27 crc kubenswrapper[5133]: I1121 13:44:27.221730 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-m5d24_0077329a-abad-4c6d-a601-2dc01fd83184/kube-multus/0.log" Nov 21 13:44:27 crc kubenswrapper[5133]: I1121 13:44:27.221810 5133 generic.go:334] "Generic (PLEG): container finished" podID="0077329a-abad-4c6d-a601-2dc01fd83184" containerID="49d78ec51ff96fc1ff9c0e668751296475767d8bf09673ff5d0f1a9d896d6595" exitCode=1 Nov 21 13:44:27 crc kubenswrapper[5133]: I1121 13:44:27.221878 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-m5d24" event={"ID":"0077329a-abad-4c6d-a601-2dc01fd83184","Type":"ContainerDied","Data":"49d78ec51ff96fc1ff9c0e668751296475767d8bf09673ff5d0f1a9d896d6595"} Nov 21 13:44:27 crc kubenswrapper[5133]: I1121 13:44:27.221945 5133 scope.go:117] "RemoveContainer" containerID="24fe246ff402a8854ee5e55ccc507a2e497dbb2cdfed3f0f8b380f00b9436661" Nov 21 13:44:27 crc kubenswrapper[5133]: I1121 13:44:27.222853 5133 scope.go:117] "RemoveContainer" containerID="49d78ec51ff96fc1ff9c0e668751296475767d8bf09673ff5d0f1a9d896d6595" Nov 21 13:44:27 crc kubenswrapper[5133]: E1121 13:44:27.223457 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-m5d24_openshift-multus(0077329a-abad-4c6d-a601-2dc01fd83184)\"" pod="openshift-multus/multus-m5d24" podUID="0077329a-abad-4c6d-a601-2dc01fd83184" Nov 21 13:44:27 crc kubenswrapper[5133]: I1121 13:44:27.250542 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-kpxqh" podStartSLOduration=95.250508737 podStartE2EDuration="1m35.250508737s" podCreationTimestamp="2025-11-21 13:42:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 13:44:18.205926252 +0000 UTC m=+118.003758540" watchObservedRunningTime="2025-11-21 13:44:27.250508737 +0000 UTC m=+127.048341025" Nov 21 13:44:27 crc kubenswrapper[5133]: I1121 13:44:27.457415 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:44:27 crc kubenswrapper[5133]: I1121 13:44:27.457415 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-x5wnh" Nov 21 13:44:27 crc kubenswrapper[5133]: E1121 13:44:27.457699 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 13:44:27 crc kubenswrapper[5133]: I1121 13:44:27.457438 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:44:27 crc kubenswrapper[5133]: E1121 13:44:27.457908 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-x5wnh" podUID="b3aabda0-97d9-4886-8909-1c423c4d3238" Nov 21 13:44:27 crc kubenswrapper[5133]: E1121 13:44:27.458027 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 13:44:27 crc kubenswrapper[5133]: E1121 13:44:27.950140 5133 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 21 13:44:28 crc kubenswrapper[5133]: I1121 13:44:28.228630 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-m5d24_0077329a-abad-4c6d-a601-2dc01fd83184/kube-multus/1.log" Nov 21 13:44:28 crc kubenswrapper[5133]: I1121 13:44:28.456868 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:44:28 crc kubenswrapper[5133]: E1121 13:44:28.457039 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 13:44:29 crc kubenswrapper[5133]: I1121 13:44:29.457158 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:44:29 crc kubenswrapper[5133]: I1121 13:44:29.457190 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-x5wnh" Nov 21 13:44:29 crc kubenswrapper[5133]: I1121 13:44:29.457206 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:44:29 crc kubenswrapper[5133]: E1121 13:44:29.457329 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 13:44:29 crc kubenswrapper[5133]: E1121 13:44:29.457505 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 13:44:29 crc kubenswrapper[5133]: E1121 13:44:29.457548 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-x5wnh" podUID="b3aabda0-97d9-4886-8909-1c423c4d3238" Nov 21 13:44:30 crc kubenswrapper[5133]: I1121 13:44:30.457421 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:44:30 crc kubenswrapper[5133]: E1121 13:44:30.458189 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 13:44:31 crc kubenswrapper[5133]: I1121 13:44:31.457466 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-x5wnh" Nov 21 13:44:31 crc kubenswrapper[5133]: I1121 13:44:31.457503 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:44:31 crc kubenswrapper[5133]: I1121 13:44:31.457621 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:44:31 crc kubenswrapper[5133]: E1121 13:44:31.457655 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-x5wnh" podUID="b3aabda0-97d9-4886-8909-1c423c4d3238" Nov 21 13:44:31 crc kubenswrapper[5133]: E1121 13:44:31.458289 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 13:44:31 crc kubenswrapper[5133]: E1121 13:44:31.458477 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 13:44:31 crc kubenswrapper[5133]: I1121 13:44:31.458970 5133 scope.go:117] "RemoveContainer" containerID="5ccbca4b83db30237624e807299bb17bb84a66216f27148373ea648a4c0cc962" Nov 21 13:44:31 crc kubenswrapper[5133]: E1121 13:44:31.459272 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-tjzm8_openshift-ovn-kubernetes(373d5da7-fae9-4689-9ede-6e2d69a54c02)\"" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" podUID="373d5da7-fae9-4689-9ede-6e2d69a54c02" Nov 21 13:44:32 crc kubenswrapper[5133]: I1121 13:44:32.457760 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:44:32 crc kubenswrapper[5133]: E1121 13:44:32.458668 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 13:44:32 crc kubenswrapper[5133]: E1121 13:44:32.952249 5133 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 21 13:44:33 crc kubenswrapper[5133]: I1121 13:44:33.457762 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:44:33 crc kubenswrapper[5133]: I1121 13:44:33.457763 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-x5wnh" Nov 21 13:44:33 crc kubenswrapper[5133]: E1121 13:44:33.458099 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 13:44:33 crc kubenswrapper[5133]: I1121 13:44:33.457780 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:44:33 crc kubenswrapper[5133]: E1121 13:44:33.458280 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-x5wnh" podUID="b3aabda0-97d9-4886-8909-1c423c4d3238" Nov 21 13:44:33 crc kubenswrapper[5133]: E1121 13:44:33.458411 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 13:44:34 crc kubenswrapper[5133]: I1121 13:44:34.457382 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:44:34 crc kubenswrapper[5133]: E1121 13:44:34.457661 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 13:44:35 crc kubenswrapper[5133]: I1121 13:44:35.457074 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:44:35 crc kubenswrapper[5133]: I1121 13:44:35.457234 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:44:35 crc kubenswrapper[5133]: E1121 13:44:35.457334 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 13:44:35 crc kubenswrapper[5133]: I1121 13:44:35.457422 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-x5wnh" Nov 21 13:44:35 crc kubenswrapper[5133]: E1121 13:44:35.457539 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 13:44:35 crc kubenswrapper[5133]: E1121 13:44:35.457588 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-x5wnh" podUID="b3aabda0-97d9-4886-8909-1c423c4d3238" Nov 21 13:44:36 crc kubenswrapper[5133]: I1121 13:44:36.457278 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:44:36 crc kubenswrapper[5133]: E1121 13:44:36.457470 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 13:44:37 crc kubenswrapper[5133]: I1121 13:44:37.456594 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:44:37 crc kubenswrapper[5133]: I1121 13:44:37.456735 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:44:37 crc kubenswrapper[5133]: I1121 13:44:37.456805 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-x5wnh" Nov 21 13:44:37 crc kubenswrapper[5133]: E1121 13:44:37.456751 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 13:44:37 crc kubenswrapper[5133]: E1121 13:44:37.456944 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 13:44:37 crc kubenswrapper[5133]: E1121 13:44:37.457074 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-x5wnh" podUID="b3aabda0-97d9-4886-8909-1c423c4d3238" Nov 21 13:44:37 crc kubenswrapper[5133]: E1121 13:44:37.954581 5133 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 21 13:44:38 crc kubenswrapper[5133]: I1121 13:44:38.457458 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:44:38 crc kubenswrapper[5133]: E1121 13:44:38.457640 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 13:44:39 crc kubenswrapper[5133]: I1121 13:44:39.457060 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:44:39 crc kubenswrapper[5133]: I1121 13:44:39.457128 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-x5wnh" Nov 21 13:44:39 crc kubenswrapper[5133]: I1121 13:44:39.457077 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:44:39 crc kubenswrapper[5133]: E1121 13:44:39.457233 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 13:44:39 crc kubenswrapper[5133]: E1121 13:44:39.457344 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 13:44:39 crc kubenswrapper[5133]: E1121 13:44:39.457508 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-x5wnh" podUID="b3aabda0-97d9-4886-8909-1c423c4d3238" Nov 21 13:44:40 crc kubenswrapper[5133]: I1121 13:44:40.456732 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:44:40 crc kubenswrapper[5133]: E1121 13:44:40.456905 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 13:44:41 crc kubenswrapper[5133]: I1121 13:44:41.457625 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:44:41 crc kubenswrapper[5133]: I1121 13:44:41.457756 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-x5wnh" Nov 21 13:44:41 crc kubenswrapper[5133]: I1121 13:44:41.457631 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:44:41 crc kubenswrapper[5133]: E1121 13:44:41.457954 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 13:44:41 crc kubenswrapper[5133]: E1121 13:44:41.458222 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 13:44:41 crc kubenswrapper[5133]: E1121 13:44:41.458303 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-x5wnh" podUID="b3aabda0-97d9-4886-8909-1c423c4d3238" Nov 21 13:44:42 crc kubenswrapper[5133]: I1121 13:44:42.457267 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:44:42 crc kubenswrapper[5133]: E1121 13:44:42.457458 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 13:44:42 crc kubenswrapper[5133]: I1121 13:44:42.460892 5133 scope.go:117] "RemoveContainer" containerID="49d78ec51ff96fc1ff9c0e668751296475767d8bf09673ff5d0f1a9d896d6595" Nov 21 13:44:42 crc kubenswrapper[5133]: I1121 13:44:42.461249 5133 scope.go:117] "RemoveContainer" containerID="5ccbca4b83db30237624e807299bb17bb84a66216f27148373ea648a4c0cc962" Nov 21 13:44:42 crc kubenswrapper[5133]: E1121 13:44:42.955533 5133 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 21 13:44:43 crc kubenswrapper[5133]: I1121 13:44:43.287242 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-tjzm8_373d5da7-fae9-4689-9ede-6e2d69a54c02/ovnkube-controller/3.log" Nov 21 13:44:43 crc kubenswrapper[5133]: I1121 13:44:43.290054 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" event={"ID":"373d5da7-fae9-4689-9ede-6e2d69a54c02","Type":"ContainerStarted","Data":"34f54716d64105c1d42d53b9d765693c455371991e82a8f605faf9074e0788f4"} Nov 21 13:44:43 crc kubenswrapper[5133]: I1121 13:44:43.290674 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:44:43 crc kubenswrapper[5133]: I1121 13:44:43.291780 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-m5d24_0077329a-abad-4c6d-a601-2dc01fd83184/kube-multus/1.log" Nov 21 13:44:43 crc kubenswrapper[5133]: I1121 13:44:43.291823 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-m5d24" event={"ID":"0077329a-abad-4c6d-a601-2dc01fd83184","Type":"ContainerStarted","Data":"4e63463f8206d5113d6726032b5b42f65d84c6b36737cf2413454c4d8a340b2c"} Nov 21 13:44:43 crc kubenswrapper[5133]: I1121 13:44:43.321607 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" podStartSLOduration=111.321592918 podStartE2EDuration="1m51.321592918s" podCreationTimestamp="2025-11-21 13:42:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 13:44:43.319002798 +0000 UTC m=+143.116835056" watchObservedRunningTime="2025-11-21 13:44:43.321592918 +0000 UTC m=+143.119425166" Nov 21 13:44:43 crc kubenswrapper[5133]: I1121 13:44:43.456781 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:44:43 crc kubenswrapper[5133]: I1121 13:44:43.456824 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-x5wnh" Nov 21 13:44:43 crc kubenswrapper[5133]: I1121 13:44:43.456824 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:44:43 crc kubenswrapper[5133]: E1121 13:44:43.456918 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 13:44:43 crc kubenswrapper[5133]: E1121 13:44:43.457101 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-x5wnh" podUID="b3aabda0-97d9-4886-8909-1c423c4d3238" Nov 21 13:44:43 crc kubenswrapper[5133]: E1121 13:44:43.457229 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 13:44:43 crc kubenswrapper[5133]: I1121 13:44:43.669580 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-x5wnh"] Nov 21 13:44:44 crc kubenswrapper[5133]: I1121 13:44:44.294983 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-x5wnh" Nov 21 13:44:44 crc kubenswrapper[5133]: E1121 13:44:44.295836 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-x5wnh" podUID="b3aabda0-97d9-4886-8909-1c423c4d3238" Nov 21 13:44:44 crc kubenswrapper[5133]: I1121 13:44:44.456653 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:44:44 crc kubenswrapper[5133]: E1121 13:44:44.456877 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 13:44:45 crc kubenswrapper[5133]: I1121 13:44:45.456548 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-x5wnh" Nov 21 13:44:45 crc kubenswrapper[5133]: I1121 13:44:45.456573 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:44:45 crc kubenswrapper[5133]: I1121 13:44:45.456548 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:44:45 crc kubenswrapper[5133]: E1121 13:44:45.456681 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-x5wnh" podUID="b3aabda0-97d9-4886-8909-1c423c4d3238" Nov 21 13:44:45 crc kubenswrapper[5133]: E1121 13:44:45.456786 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 13:44:45 crc kubenswrapper[5133]: E1121 13:44:45.456950 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 13:44:46 crc kubenswrapper[5133]: I1121 13:44:46.456903 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:44:46 crc kubenswrapper[5133]: E1121 13:44:46.457123 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 21 13:44:47 crc kubenswrapper[5133]: I1121 13:44:47.456873 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:44:47 crc kubenswrapper[5133]: I1121 13:44:47.457117 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-x5wnh" Nov 21 13:44:47 crc kubenswrapper[5133]: I1121 13:44:47.457305 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:44:47 crc kubenswrapper[5133]: E1121 13:44:47.457327 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 21 13:44:47 crc kubenswrapper[5133]: E1121 13:44:47.457377 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 21 13:44:47 crc kubenswrapper[5133]: E1121 13:44:47.457443 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-x5wnh" podUID="b3aabda0-97d9-4886-8909-1c423c4d3238" Nov 21 13:44:48 crc kubenswrapper[5133]: I1121 13:44:48.456665 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:44:48 crc kubenswrapper[5133]: I1121 13:44:48.458655 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 21 13:44:48 crc kubenswrapper[5133]: I1121 13:44:48.458689 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 21 13:44:49 crc kubenswrapper[5133]: I1121 13:44:49.457394 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:44:49 crc kubenswrapper[5133]: I1121 13:44:49.457449 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:44:49 crc kubenswrapper[5133]: I1121 13:44:49.457697 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-x5wnh" Nov 21 13:44:49 crc kubenswrapper[5133]: I1121 13:44:49.460709 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 21 13:44:49 crc kubenswrapper[5133]: I1121 13:44:49.460899 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 21 13:44:49 crc kubenswrapper[5133]: I1121 13:44:49.462427 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 21 13:44:49 crc kubenswrapper[5133]: I1121 13:44:49.462874 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 21 13:44:53 crc kubenswrapper[5133]: I1121 13:44:53.310553 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 13:44:53 crc kubenswrapper[5133]: I1121 13:44:53.310956 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 13:44:53 crc kubenswrapper[5133]: I1121 13:44:53.673768 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:44:56 crc kubenswrapper[5133]: I1121 13:44:56.073859 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:44:56 crc kubenswrapper[5133]: E1121 13:44:56.074260 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 13:46:58.074220502 +0000 UTC m=+277.872052750 (durationBeforeRetry 2m2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:44:56 crc kubenswrapper[5133]: I1121 13:44:56.175053 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:44:56 crc kubenswrapper[5133]: I1121 13:44:56.175140 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:44:56 crc kubenswrapper[5133]: I1121 13:44:56.175176 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:44:56 crc kubenswrapper[5133]: I1121 13:44:56.175217 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:44:56 crc kubenswrapper[5133]: I1121 13:44:56.180406 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:44:56 crc kubenswrapper[5133]: I1121 13:44:56.185583 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:44:56 crc kubenswrapper[5133]: I1121 13:44:56.185994 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:44:56 crc kubenswrapper[5133]: I1121 13:44:56.186744 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:44:56 crc kubenswrapper[5133]: I1121 13:44:56.270224 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 21 13:44:56 crc kubenswrapper[5133]: I1121 13:44:56.379388 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:44:56 crc kubenswrapper[5133]: I1121 13:44:56.396615 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 21 13:44:56 crc kubenswrapper[5133]: W1121 13:44:56.599024 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9d751cbb_f2e2_430d_9754_c882a5e924a5.slice/crio-704f9c2e9f3cfb4733325635b26a0c3ca7d2176044fa586ec8b32a6255861b59 WatchSource:0}: Error finding container 704f9c2e9f3cfb4733325635b26a0c3ca7d2176044fa586ec8b32a6255861b59: Status 404 returned error can't find the container with id 704f9c2e9f3cfb4733325635b26a0c3ca7d2176044fa586ec8b32a6255861b59 Nov 21 13:44:56 crc kubenswrapper[5133]: W1121 13:44:56.614178 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3b6479f0_333b_4a96_9adf_2099afdc2447.slice/crio-c1ffde9a6f2d2ba3b367a6b2996d7173417f9b9f3f5868a113d8a9bbbc94aef1 WatchSource:0}: Error finding container c1ffde9a6f2d2ba3b367a6b2996d7173417f9b9f3f5868a113d8a9bbbc94aef1: Status 404 returned error can't find the container with id c1ffde9a6f2d2ba3b367a6b2996d7173417f9b9f3f5868a113d8a9bbbc94aef1 Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.343423 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"e2cac542e823112e8e1922db916e1bb0355f3e20b81722b0827a27e3871a762d"} Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.346199 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"704f9c2e9f3cfb4733325635b26a0c3ca7d2176044fa586ec8b32a6255861b59"} Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.348172 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"c1ffde9a6f2d2ba3b367a6b2996d7173417f9b9f3f5868a113d8a9bbbc94aef1"} Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.564913 5133 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.625684 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-zg29g"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.626267 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zg29g" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.629176 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-z8962"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.629748 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-z8962" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.630290 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-wdh4q"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.637081 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-wdh4q" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.637113 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.638092 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.638829 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.644306 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.644332 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.666883 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-65hwm"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.667670 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-65hwm" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.668532 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-dn6hh"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.669238 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-dn6hh" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.670722 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.671317 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.677263 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-ggrbw"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.678022 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-ggrbw" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.678688 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.678941 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-5pm4x"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.679627 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.681198 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.681457 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.681665 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.681814 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.682083 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.682240 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.682440 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.682656 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.682896 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-z98jb"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.683573 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-z98jb" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.683874 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-5pm4x" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.682907 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.684726 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.684956 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.685153 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.685623 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.685661 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.682965 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.683058 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.683180 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.683229 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.683273 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.686323 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.686343 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.686615 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.686754 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.687795 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.688079 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.688267 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.688462 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.688528 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.688594 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-9jc2h"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.688919 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.689139 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-w26jv"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.689568 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.689653 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-w26jv" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.689711 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.689570 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-m54n8"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.690075 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-9jc2h" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.690819 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.694570 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nj6bv\" (UniqueName: \"kubernetes.io/projected/85aadf35-e509-4da3-92a1-8dab8ec0a6b5-kube-api-access-nj6bv\") pod \"cluster-image-registry-operator-dc59b4c8b-z8962\" (UID: \"85aadf35-e509-4da3-92a1-8dab8ec0a6b5\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-z8962" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.694622 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/241eb3eb-c31a-4fe5-8547-34a326d75803-serving-cert\") pod \"route-controller-manager-6576b87f9c-zg29g\" (UID: \"241eb3eb-c31a-4fe5-8547-34a326d75803\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zg29g" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.694653 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/241eb3eb-c31a-4fe5-8547-34a326d75803-client-ca\") pod \"route-controller-manager-6576b87f9c-zg29g\" (UID: \"241eb3eb-c31a-4fe5-8547-34a326d75803\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zg29g" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.694676 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/241eb3eb-c31a-4fe5-8547-34a326d75803-config\") pod \"route-controller-manager-6576b87f9c-zg29g\" (UID: \"241eb3eb-c31a-4fe5-8547-34a326d75803\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zg29g" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.694699 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7ef69baf-6993-472e-bb5c-405965d65d22-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-wdh4q\" (UID: \"7ef69baf-6993-472e-bb5c-405965d65d22\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-wdh4q" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.694722 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/85aadf35-e509-4da3-92a1-8dab8ec0a6b5-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-z8962\" (UID: \"85aadf35-e509-4da3-92a1-8dab8ec0a6b5\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-z8962" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.694791 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7vf6l\" (UniqueName: \"kubernetes.io/projected/7ef69baf-6993-472e-bb5c-405965d65d22-kube-api-access-7vf6l\") pod \"openshift-controller-manager-operator-756b6f6bc6-wdh4q\" (UID: \"7ef69baf-6993-472e-bb5c-405965d65d22\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-wdh4q" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.694810 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/85aadf35-e509-4da3-92a1-8dab8ec0a6b5-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-z8962\" (UID: \"85aadf35-e509-4da3-92a1-8dab8ec0a6b5\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-z8962" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.694833 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/85aadf35-e509-4da3-92a1-8dab8ec0a6b5-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-z8962\" (UID: \"85aadf35-e509-4da3-92a1-8dab8ec0a6b5\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-z8962" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.694856 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p7jl9\" (UniqueName: \"kubernetes.io/projected/241eb3eb-c31a-4fe5-8547-34a326d75803-kube-api-access-p7jl9\") pod \"route-controller-manager-6576b87f9c-zg29g\" (UID: \"241eb3eb-c31a-4fe5-8547-34a326d75803\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zg29g" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.694881 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7ef69baf-6993-472e-bb5c-405965d65d22-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-wdh4q\" (UID: \"7ef69baf-6993-472e-bb5c-405965d65d22\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-wdh4q" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.696634 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.697090 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.697108 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.697302 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.697304 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.697379 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.697489 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.697768 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.697830 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.699064 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.704599 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.704647 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.708757 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.745173 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-72h2c"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.745228 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.745431 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.746117 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-jpq5h"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.747240 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qkmck"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.747713 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.748047 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-7jhbl"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.749070 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-7jhbl" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.757544 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-jpq5h" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.758330 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-72h2c" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.758649 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-m54n8" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.759145 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qkmck" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.761896 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-9rc8w"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.782794 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.783184 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.783993 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.784083 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.784280 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.785115 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.785627 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-p6f2d"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.786054 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-4bw4h"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.786455 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-4bw4h" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.786503 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-7jqbx"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.786766 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-9rc8w" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.787613 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-p6f2d" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.788790 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-7jqbx" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.792823 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.793047 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.793069 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.793299 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.795529 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/aaaa8cb6-0a5e-4dd2-b549-f449a6e964cd-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-9jc2h\" (UID: \"aaaa8cb6-0a5e-4dd2-b549-f449a6e964cd\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-9jc2h" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.795644 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kpxjs\" (UniqueName: \"kubernetes.io/projected/e9fdda75-f32d-445e-9658-d135d0a548ae-kube-api-access-kpxjs\") pod \"apiserver-7bbb656c7d-72h2c\" (UID: \"e9fdda75-f32d-445e-9658-d135d0a548ae\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-72h2c" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.795725 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/04f53da9-c9c4-4af6-a8e1-37e91549a81a-trusted-ca-bundle\") pod \"apiserver-76f77b778f-dn6hh\" (UID: \"04f53da9-c9c4-4af6-a8e1-37e91549a81a\") " pod="openshift-apiserver/apiserver-76f77b778f-dn6hh" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.795803 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7ef69baf-6993-472e-bb5c-405965d65d22-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-wdh4q\" (UID: \"7ef69baf-6993-472e-bb5c-405965d65d22\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-wdh4q" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.795902 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/6069f0ea-bc74-4858-a564-156202fbe36d-metrics-tls\") pod \"ingress-operator-5b745b69d9-p6f2d\" (UID: \"6069f0ea-bc74-4858-a564-156202fbe36d\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-p6f2d" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.799613 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6069f0ea-bc74-4858-a564-156202fbe36d-bound-sa-token\") pod \"ingress-operator-5b745b69d9-p6f2d\" (UID: \"6069f0ea-bc74-4858-a564-156202fbe36d\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-p6f2d" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.799694 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d6lsx\" (UniqueName: \"kubernetes.io/projected/6069f0ea-bc74-4858-a564-156202fbe36d-kube-api-access-d6lsx\") pod \"ingress-operator-5b745b69d9-p6f2d\" (UID: \"6069f0ea-bc74-4858-a564-156202fbe36d\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-p6f2d" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.799759 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/25198fe7-40d9-4add-871c-8f9adadddb75-trusted-ca\") pod \"console-operator-58897d9998-z98jb\" (UID: \"25198fe7-40d9-4add-871c-8f9adadddb75\") " pod="openshift-console-operator/console-operator-58897d9998-z98jb" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.799835 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/576691ca-af69-46de-b8c2-5b2195e3db0b-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-w26jv\" (UID: \"576691ca-af69-46de-b8c2-5b2195e3db0b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-w26jv" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.799907 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/d1ec861e-fbe3-412e-9885-43a9e3c5be1e-console-oauth-config\") pod \"console-f9d7485db-m54n8\" (UID: \"d1ec861e-fbe3-412e-9885-43a9e3c5be1e\") " pod="openshift-console/console-f9d7485db-m54n8" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.799984 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/04f53da9-c9c4-4af6-a8e1-37e91549a81a-encryption-config\") pod \"apiserver-76f77b778f-dn6hh\" (UID: \"04f53da9-c9c4-4af6-a8e1-37e91549a81a\") " pod="openshift-apiserver/apiserver-76f77b778f-dn6hh" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.800131 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/d1ec861e-fbe3-412e-9885-43a9e3c5be1e-service-ca\") pod \"console-f9d7485db-m54n8\" (UID: \"d1ec861e-fbe3-412e-9885-43a9e3c5be1e\") " pod="openshift-console/console-f9d7485db-m54n8" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.800220 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e9fdda75-f32d-445e-9658-d135d0a548ae-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-72h2c\" (UID: \"e9fdda75-f32d-445e-9658-d135d0a548ae\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-72h2c" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.800311 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/e9fdda75-f32d-445e-9658-d135d0a548ae-encryption-config\") pod \"apiserver-7bbb656c7d-72h2c\" (UID: \"e9fdda75-f32d-445e-9658-d135d0a548ae\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-72h2c" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.800422 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/d1ec861e-fbe3-412e-9885-43a9e3c5be1e-console-serving-cert\") pod \"console-f9d7485db-m54n8\" (UID: \"d1ec861e-fbe3-412e-9885-43a9e3c5be1e\") " pod="openshift-console/console-f9d7485db-m54n8" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.800504 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/04f53da9-c9c4-4af6-a8e1-37e91549a81a-image-import-ca\") pod \"apiserver-76f77b778f-dn6hh\" (UID: \"04f53da9-c9c4-4af6-a8e1-37e91549a81a\") " pod="openshift-apiserver/apiserver-76f77b778f-dn6hh" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.800597 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9b4d3b1f-5c64-4f18-91ca-d70893516609-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-ggrbw\" (UID: \"9b4d3b1f-5c64-4f18-91ca-d70893516609\") " pod="openshift-authentication/oauth-openshift-558db77b4-ggrbw" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.800689 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aaaa8cb6-0a5e-4dd2-b549-f449a6e964cd-config\") pod \"authentication-operator-69f744f599-9jc2h\" (UID: \"aaaa8cb6-0a5e-4dd2-b549-f449a6e964cd\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-9jc2h" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.800798 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/85aadf35-e509-4da3-92a1-8dab8ec0a6b5-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-z8962\" (UID: \"85aadf35-e509-4da3-92a1-8dab8ec0a6b5\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-z8962" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.800892 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7vf6l\" (UniqueName: \"kubernetes.io/projected/7ef69baf-6993-472e-bb5c-405965d65d22-kube-api-access-7vf6l\") pod \"openshift-controller-manager-operator-756b6f6bc6-wdh4q\" (UID: \"7ef69baf-6993-472e-bb5c-405965d65d22\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-wdh4q" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.800986 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-shtvf\" (UniqueName: \"kubernetes.io/projected/576691ca-af69-46de-b8c2-5b2195e3db0b-kube-api-access-shtvf\") pod \"machine-api-operator-5694c8668f-w26jv\" (UID: \"576691ca-af69-46de-b8c2-5b2195e3db0b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-w26jv" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.801163 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/04f53da9-c9c4-4af6-a8e1-37e91549a81a-etcd-client\") pod \"apiserver-76f77b778f-dn6hh\" (UID: \"04f53da9-c9c4-4af6-a8e1-37e91549a81a\") " pod="openshift-apiserver/apiserver-76f77b778f-dn6hh" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.801303 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/04f53da9-c9c4-4af6-a8e1-37e91549a81a-serving-cert\") pod \"apiserver-76f77b778f-dn6hh\" (UID: \"04f53da9-c9c4-4af6-a8e1-37e91549a81a\") " pod="openshift-apiserver/apiserver-76f77b778f-dn6hh" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.801392 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/85aadf35-e509-4da3-92a1-8dab8ec0a6b5-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-z8962\" (UID: \"85aadf35-e509-4da3-92a1-8dab8ec0a6b5\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-z8962" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.801469 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/e9fdda75-f32d-445e-9658-d135d0a548ae-audit-dir\") pod \"apiserver-7bbb656c7d-72h2c\" (UID: \"e9fdda75-f32d-445e-9658-d135d0a548ae\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-72h2c" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.801535 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/d1ec861e-fbe3-412e-9885-43a9e3c5be1e-console-config\") pod \"console-f9d7485db-m54n8\" (UID: \"d1ec861e-fbe3-412e-9885-43a9e3c5be1e\") " pod="openshift-console/console-f9d7485db-m54n8" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.801599 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/04f53da9-c9c4-4af6-a8e1-37e91549a81a-node-pullsecrets\") pod \"apiserver-76f77b778f-dn6hh\" (UID: \"04f53da9-c9c4-4af6-a8e1-37e91549a81a\") " pod="openshift-apiserver/apiserver-76f77b778f-dn6hh" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.801669 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e2755732-9869-46c9-be0e-e1fc77aa6644-config\") pod \"controller-manager-879f6c89f-5pm4x\" (UID: \"e2755732-9869-46c9-be0e-e1fc77aa6644\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5pm4x" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.801744 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7ef69baf-6993-472e-bb5c-405965d65d22-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-wdh4q\" (UID: \"7ef69baf-6993-472e-bb5c-405965d65d22\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-wdh4q" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.801908 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/e9fdda75-f32d-445e-9658-d135d0a548ae-etcd-client\") pod \"apiserver-7bbb656c7d-72h2c\" (UID: \"e9fdda75-f32d-445e-9658-d135d0a548ae\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-72h2c" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.802046 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9jqz6\" (UniqueName: \"kubernetes.io/projected/f7d3e23a-de5e-4562-9142-16955fe746ad-kube-api-access-9jqz6\") pod \"machine-approver-56656f9798-65hwm\" (UID: \"f7d3e23a-de5e-4562-9142-16955fe746ad\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-65hwm" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.802149 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/04f53da9-c9c4-4af6-a8e1-37e91549a81a-etcd-serving-ca\") pod \"apiserver-76f77b778f-dn6hh\" (UID: \"04f53da9-c9c4-4af6-a8e1-37e91549a81a\") " pod="openshift-apiserver/apiserver-76f77b778f-dn6hh" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.802257 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/25198fe7-40d9-4add-871c-8f9adadddb75-serving-cert\") pod \"console-operator-58897d9998-z98jb\" (UID: \"25198fe7-40d9-4add-871c-8f9adadddb75\") " pod="openshift-console-operator/console-operator-58897d9998-z98jb" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.802352 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e2755732-9869-46c9-be0e-e1fc77aa6644-serving-cert\") pod \"controller-manager-879f6c89f-5pm4x\" (UID: \"e2755732-9869-46c9-be0e-e1fc77aa6644\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5pm4x" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.802458 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/e9fdda75-f32d-445e-9658-d135d0a548ae-audit-policies\") pod \"apiserver-7bbb656c7d-72h2c\" (UID: \"e9fdda75-f32d-445e-9658-d135d0a548ae\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-72h2c" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.802556 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/f7d3e23a-de5e-4562-9142-16955fe746ad-machine-approver-tls\") pod \"machine-approver-56656f9798-65hwm\" (UID: \"f7d3e23a-de5e-4562-9142-16955fe746ad\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-65hwm" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.802698 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/f3cae218-7d6a-42c0-a269-ad8d27dfed75-available-featuregates\") pod \"openshift-config-operator-7777fb866f-9rc8w\" (UID: \"f3cae218-7d6a-42c0-a269-ad8d27dfed75\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-9rc8w" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.795920 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-ntz66"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.802373 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.802866 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e2755732-9869-46c9-be0e-e1fc77aa6644-client-ca\") pod \"controller-manager-879f6c89f-5pm4x\" (UID: \"e2755732-9869-46c9-be0e-e1fc77aa6644\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5pm4x" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.804106 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-p929b"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.797076 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7ef69baf-6993-472e-bb5c-405965d65d22-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-wdh4q\" (UID: \"7ef69baf-6993-472e-bb5c-405965d65d22\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-wdh4q" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.804681 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-bhlrh"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.805374 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/e9fdda75-f32d-445e-9658-d135d0a548ae-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-72h2c\" (UID: \"e9fdda75-f32d-445e-9658-d135d0a548ae\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-72h2c" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.805446 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ktl5t\" (UniqueName: \"kubernetes.io/projected/aaaa8cb6-0a5e-4dd2-b549-f449a6e964cd-kube-api-access-ktl5t\") pod \"authentication-operator-69f744f599-9jc2h\" (UID: \"aaaa8cb6-0a5e-4dd2-b549-f449a6e964cd\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-9jc2h" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.805403 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-bhlrh" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.805686 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nj6bv\" (UniqueName: \"kubernetes.io/projected/85aadf35-e509-4da3-92a1-8dab8ec0a6b5-kube-api-access-nj6bv\") pod \"cluster-image-registry-operator-dc59b4c8b-z8962\" (UID: \"85aadf35-e509-4da3-92a1-8dab8ec0a6b5\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-z8962" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.805787 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jkl5p\" (UniqueName: \"kubernetes.io/projected/d1ec861e-fbe3-412e-9885-43a9e3c5be1e-kube-api-access-jkl5p\") pod \"console-f9d7485db-m54n8\" (UID: \"d1ec861e-fbe3-412e-9885-43a9e3c5be1e\") " pod="openshift-console/console-f9d7485db-m54n8" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.805812 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kcbq8\" (UniqueName: \"kubernetes.io/projected/12eb63da-c25d-4d6a-84e5-070b0c9bd1aa-kube-api-access-kcbq8\") pod \"cluster-samples-operator-665b6dd947-jpq5h\" (UID: \"12eb63da-c25d-4d6a-84e5-070b0c9bd1aa\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-jpq5h" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.805845 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.806016 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/576691ca-af69-46de-b8c2-5b2195e3db0b-images\") pod \"machine-api-operator-5694c8668f-w26jv\" (UID: \"576691ca-af69-46de-b8c2-5b2195e3db0b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-w26jv" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.806054 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jrqqk\" (UniqueName: \"kubernetes.io/projected/04f53da9-c9c4-4af6-a8e1-37e91549a81a-kube-api-access-jrqqk\") pod \"apiserver-76f77b778f-dn6hh\" (UID: \"04f53da9-c9c4-4af6-a8e1-37e91549a81a\") " pod="openshift-apiserver/apiserver-76f77b778f-dn6hh" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.806208 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.806244 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e9fdda75-f32d-445e-9658-d135d0a548ae-serving-cert\") pod \"apiserver-7bbb656c7d-72h2c\" (UID: \"e9fdda75-f32d-445e-9658-d135d0a548ae\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-72h2c" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.806270 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/04f53da9-c9c4-4af6-a8e1-37e91549a81a-config\") pod \"apiserver-76f77b778f-dn6hh\" (UID: \"04f53da9-c9c4-4af6-a8e1-37e91549a81a\") " pod="openshift-apiserver/apiserver-76f77b778f-dn6hh" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.806286 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/9b4d3b1f-5c64-4f18-91ca-d70893516609-audit-dir\") pod \"oauth-openshift-558db77b4-ggrbw\" (UID: \"9b4d3b1f-5c64-4f18-91ca-d70893516609\") " pod="openshift-authentication/oauth-openshift-558db77b4-ggrbw" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.806323 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.806349 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/22c9ec1f-50e2-455a-81d3-70a1df82c3ce-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-qkmck\" (UID: \"22c9ec1f-50e2-455a-81d3-70a1df82c3ce\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qkmck" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.806392 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/9b4d3b1f-5c64-4f18-91ca-d70893516609-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-ggrbw\" (UID: \"9b4d3b1f-5c64-4f18-91ca-d70893516609\") " pod="openshift-authentication/oauth-openshift-558db77b4-ggrbw" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.806447 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/9b4d3b1f-5c64-4f18-91ca-d70893516609-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-ggrbw\" (UID: \"9b4d3b1f-5c64-4f18-91ca-d70893516609\") " pod="openshift-authentication/oauth-openshift-558db77b4-ggrbw" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.806456 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-ntz66" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.806484 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/241eb3eb-c31a-4fe5-8547-34a326d75803-client-ca\") pod \"route-controller-manager-6576b87f9c-zg29g\" (UID: \"241eb3eb-c31a-4fe5-8547-34a326d75803\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zg29g" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.806522 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/aaaa8cb6-0a5e-4dd2-b549-f449a6e964cd-service-ca-bundle\") pod \"authentication-operator-69f744f599-9jc2h\" (UID: \"aaaa8cb6-0a5e-4dd2-b549-f449a6e964cd\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-9jc2h" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.806548 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/04f53da9-c9c4-4af6-a8e1-37e91549a81a-audit-dir\") pod \"apiserver-76f77b778f-dn6hh\" (UID: \"04f53da9-c9c4-4af6-a8e1-37e91549a81a\") " pod="openshift-apiserver/apiserver-76f77b778f-dn6hh" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.806623 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/25198fe7-40d9-4add-871c-8f9adadddb75-config\") pod \"console-operator-58897d9998-z98jb\" (UID: \"25198fe7-40d9-4add-871c-8f9adadddb75\") " pod="openshift-console-operator/console-operator-58897d9998-z98jb" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.806656 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/241eb3eb-c31a-4fe5-8547-34a326d75803-config\") pod \"route-controller-manager-6576b87f9c-zg29g\" (UID: \"241eb3eb-c31a-4fe5-8547-34a326d75803\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zg29g" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.806688 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/f7d3e23a-de5e-4562-9142-16955fe746ad-auth-proxy-config\") pod \"machine-approver-56656f9798-65hwm\" (UID: \"f7d3e23a-de5e-4562-9142-16955fe746ad\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-65hwm" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.806715 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-p929b" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.806745 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/04f53da9-c9c4-4af6-a8e1-37e91549a81a-audit\") pod \"apiserver-76f77b778f-dn6hh\" (UID: \"04f53da9-c9c4-4af6-a8e1-37e91549a81a\") " pod="openshift-apiserver/apiserver-76f77b778f-dn6hh" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.806773 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/12eb63da-c25d-4d6a-84e5-070b0c9bd1aa-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-jpq5h\" (UID: \"12eb63da-c25d-4d6a-84e5-070b0c9bd1aa\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-jpq5h" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.806802 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/d1ec861e-fbe3-412e-9885-43a9e3c5be1e-oauth-serving-cert\") pod \"console-f9d7485db-m54n8\" (UID: \"d1ec861e-fbe3-412e-9885-43a9e3c5be1e\") " pod="openshift-console/console-f9d7485db-m54n8" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.806827 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/9b4d3b1f-5c64-4f18-91ca-d70893516609-audit-policies\") pod \"oauth-openshift-558db77b4-ggrbw\" (UID: \"9b4d3b1f-5c64-4f18-91ca-d70893516609\") " pod="openshift-authentication/oauth-openshift-558db77b4-ggrbw" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.806858 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/9b4d3b1f-5c64-4f18-91ca-d70893516609-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-ggrbw\" (UID: \"9b4d3b1f-5c64-4f18-91ca-d70893516609\") " pod="openshift-authentication/oauth-openshift-558db77b4-ggrbw" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.807212 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/9b4d3b1f-5c64-4f18-91ca-d70893516609-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-ggrbw\" (UID: \"9b4d3b1f-5c64-4f18-91ca-d70893516609\") " pod="openshift-authentication/oauth-openshift-558db77b4-ggrbw" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.807334 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-864j7\" (UniqueName: \"kubernetes.io/projected/25198fe7-40d9-4add-871c-8f9adadddb75-kube-api-access-864j7\") pod \"console-operator-58897d9998-z98jb\" (UID: \"25198fe7-40d9-4add-871c-8f9adadddb75\") " pod="openshift-console-operator/console-operator-58897d9998-z98jb" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.807494 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/85aadf35-e509-4da3-92a1-8dab8ec0a6b5-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-z8962\" (UID: \"85aadf35-e509-4da3-92a1-8dab8ec0a6b5\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-z8962" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.807629 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4j669\" (UniqueName: \"kubernetes.io/projected/dfcd7a67-a498-4c5b-82e9-c998a388b652-kube-api-access-4j669\") pod \"service-ca-operator-777779d784-4bw4h\" (UID: \"dfcd7a67-a498-4c5b-82e9-c998a388b652\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-4bw4h" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.807732 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/9b4d3b1f-5c64-4f18-91ca-d70893516609-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-ggrbw\" (UID: \"9b4d3b1f-5c64-4f18-91ca-d70893516609\") " pod="openshift-authentication/oauth-openshift-558db77b4-ggrbw" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.807858 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-47ptm\" (UniqueName: \"kubernetes.io/projected/e2755732-9869-46c9-be0e-e1fc77aa6644-kube-api-access-47ptm\") pod \"controller-manager-879f6c89f-5pm4x\" (UID: \"e2755732-9869-46c9-be0e-e1fc77aa6644\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5pm4x" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.807975 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-dvnq8"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.808321 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/241eb3eb-c31a-4fe5-8547-34a326d75803-client-ca\") pod \"route-controller-manager-6576b87f9c-zg29g\" (UID: \"241eb3eb-c31a-4fe5-8547-34a326d75803\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zg29g" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.808442 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.808447 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vkc7z\" (UniqueName: \"kubernetes.io/projected/22c9ec1f-50e2-455a-81d3-70a1df82c3ce-kube-api-access-vkc7z\") pod \"openshift-apiserver-operator-796bbdcf4f-qkmck\" (UID: \"22c9ec1f-50e2-455a-81d3-70a1df82c3ce\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qkmck" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.808586 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/9b4d3b1f-5c64-4f18-91ca-d70893516609-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-ggrbw\" (UID: \"9b4d3b1f-5c64-4f18-91ca-d70893516609\") " pod="openshift-authentication/oauth-openshift-558db77b4-ggrbw" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.808633 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6069f0ea-bc74-4858-a564-156202fbe36d-trusted-ca\") pod \"ingress-operator-5b745b69d9-p6f2d\" (UID: \"6069f0ea-bc74-4858-a564-156202fbe36d\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-p6f2d" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.808674 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d1ec861e-fbe3-412e-9885-43a9e3c5be1e-trusted-ca-bundle\") pod \"console-f9d7485db-m54n8\" (UID: \"d1ec861e-fbe3-412e-9885-43a9e3c5be1e\") " pod="openshift-console/console-f9d7485db-m54n8" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.808708 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f7d3e23a-de5e-4562-9142-16955fe746ad-config\") pod \"machine-approver-56656f9798-65hwm\" (UID: \"f7d3e23a-de5e-4562-9142-16955fe746ad\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-65hwm" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.808726 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/9b4d3b1f-5c64-4f18-91ca-d70893516609-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-ggrbw\" (UID: \"9b4d3b1f-5c64-4f18-91ca-d70893516609\") " pod="openshift-authentication/oauth-openshift-558db77b4-ggrbw" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.808771 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/9b4d3b1f-5c64-4f18-91ca-d70893516609-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-ggrbw\" (UID: \"9b4d3b1f-5c64-4f18-91ca-d70893516609\") " pod="openshift-authentication/oauth-openshift-558db77b4-ggrbw" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.808786 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-dvnq8" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.808806 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p7jl9\" (UniqueName: \"kubernetes.io/projected/241eb3eb-c31a-4fe5-8547-34a326d75803-kube-api-access-p7jl9\") pod \"route-controller-manager-6576b87f9c-zg29g\" (UID: \"241eb3eb-c31a-4fe5-8547-34a326d75803\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zg29g" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.808857 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/576691ca-af69-46de-b8c2-5b2195e3db0b-config\") pod \"machine-api-operator-5694c8668f-w26jv\" (UID: \"576691ca-af69-46de-b8c2-5b2195e3db0b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-w26jv" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.808964 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/9b4d3b1f-5c64-4f18-91ca-d70893516609-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-ggrbw\" (UID: \"9b4d3b1f-5c64-4f18-91ca-d70893516609\") " pod="openshift-authentication/oauth-openshift-558db77b4-ggrbw" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.808992 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/9b4d3b1f-5c64-4f18-91ca-d70893516609-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-ggrbw\" (UID: \"9b4d3b1f-5c64-4f18-91ca-d70893516609\") " pod="openshift-authentication/oauth-openshift-558db77b4-ggrbw" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.809037 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c9ec1f-50e2-455a-81d3-70a1df82c3ce-config\") pod \"openshift-apiserver-operator-796bbdcf4f-qkmck\" (UID: \"22c9ec1f-50e2-455a-81d3-70a1df82c3ce\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qkmck" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.809063 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8bdlf\" (UniqueName: \"kubernetes.io/projected/bb29fa4c-cfd8-45b5-a0d3-895fb20d5cb8-kube-api-access-8bdlf\") pod \"downloads-7954f5f757-7jhbl\" (UID: \"bb29fa4c-cfd8-45b5-a0d3-895fb20d5cb8\") " pod="openshift-console/downloads-7954f5f757-7jhbl" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.809082 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dfcd7a67-a498-4c5b-82e9-c998a388b652-config\") pod \"service-ca-operator-777779d784-4bw4h\" (UID: \"dfcd7a67-a498-4c5b-82e9-c998a388b652\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-4bw4h" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.809103 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/aaaa8cb6-0a5e-4dd2-b549-f449a6e964cd-serving-cert\") pod \"authentication-operator-69f744f599-9jc2h\" (UID: \"aaaa8cb6-0a5e-4dd2-b549-f449a6e964cd\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-9jc2h" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.809122 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f3cae218-7d6a-42c0-a269-ad8d27dfed75-serving-cert\") pod \"openshift-config-operator-7777fb866f-9rc8w\" (UID: \"f3cae218-7d6a-42c0-a269-ad8d27dfed75\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-9rc8w" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.809141 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dfcd7a67-a498-4c5b-82e9-c998a388b652-serving-cert\") pod \"service-ca-operator-777779d784-4bw4h\" (UID: \"dfcd7a67-a498-4c5b-82e9-c998a388b652\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-4bw4h" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.809157 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m74kk\" (UniqueName: \"kubernetes.io/projected/9b4d3b1f-5c64-4f18-91ca-d70893516609-kube-api-access-m74kk\") pod \"oauth-openshift-558db77b4-ggrbw\" (UID: \"9b4d3b1f-5c64-4f18-91ca-d70893516609\") " pod="openshift-authentication/oauth-openshift-558db77b4-ggrbw" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.809218 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dfvp5\" (UniqueName: \"kubernetes.io/projected/f3cae218-7d6a-42c0-a269-ad8d27dfed75-kube-api-access-dfvp5\") pod \"openshift-config-operator-7777fb866f-9rc8w\" (UID: \"f3cae218-7d6a-42c0-a269-ad8d27dfed75\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-9rc8w" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.809246 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/241eb3eb-c31a-4fe5-8547-34a326d75803-serving-cert\") pod \"route-controller-manager-6576b87f9c-zg29g\" (UID: \"241eb3eb-c31a-4fe5-8547-34a326d75803\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zg29g" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.809265 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/e2755732-9869-46c9-be0e-e1fc77aa6644-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-5pm4x\" (UID: \"e2755732-9869-46c9-be0e-e1fc77aa6644\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5pm4x" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.810067 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.811334 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.811629 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.811833 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.812045 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.812163 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.812240 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.812449 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.812493 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.812642 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.812801 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.812910 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.813025 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.813082 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.812650 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.813313 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.813359 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.813483 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.813495 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.813588 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.813628 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.813663 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.813690 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.813730 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/241eb3eb-c31a-4fe5-8547-34a326d75803-config\") pod \"route-controller-manager-6576b87f9c-zg29g\" (UID: \"241eb3eb-c31a-4fe5-8547-34a326d75803\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zg29g" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.813797 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.813811 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.813969 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.813982 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.813993 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.816077 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/85aadf35-e509-4da3-92a1-8dab8ec0a6b5-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-z8962\" (UID: \"85aadf35-e509-4da3-92a1-8dab8ec0a6b5\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-z8962" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.816091 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.816430 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/241eb3eb-c31a-4fe5-8547-34a326d75803-serving-cert\") pod \"route-controller-manager-6576b87f9c-zg29g\" (UID: \"241eb3eb-c31a-4fe5-8547-34a326d75803\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zg29g" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.817120 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7ef69baf-6993-472e-bb5c-405965d65d22-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-wdh4q\" (UID: \"7ef69baf-6993-472e-bb5c-405965d65d22\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-wdh4q" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.819241 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.820255 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/85aadf35-e509-4da3-92a1-8dab8ec0a6b5-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-z8962\" (UID: \"85aadf35-e509-4da3-92a1-8dab8ec0a6b5\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-z8962" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.820269 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.820495 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.821102 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.822927 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.826039 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.827305 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-zd8tq"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.843812 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-zd8tq" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.846789 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-j9kwz"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.848037 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.848822 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-6fn5b"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.849473 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-j9kwz" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.850093 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-6fn5b" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.850929 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.853130 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-sspkx"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.854133 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-sspkx" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.859887 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-lz2px"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.863549 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-lz2px" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.865778 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-5rjt4"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.867045 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.868145 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-x7zf8"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.868444 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.868795 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-x7zf8" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.870599 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-f9kbn"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.871398 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-f9kbn" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.872180 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-mghzk"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.872774 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-mghzk" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.873761 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-mp878"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.874882 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-mp878" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.876461 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-q7g4h"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.877139 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-q7g4h" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.878272 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-sjflw"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.879109 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-sjflw" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.880097 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-ddmrq"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.881273 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-zg29g"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.881404 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-ddmrq" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.881681 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-z8962"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.882727 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qkmck"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.884114 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395530-jlnll"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.884806 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395530-jlnll" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.885073 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-9mtt2"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.885785 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-9mtt2" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.886312 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-zfc95"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.887987 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.888422 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-5pm4x"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.888529 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-zfc95" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.889400 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-dn6hh"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.890497 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-ntz66"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.892153 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-p6f2d"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.893213 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-j9kwz"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.894205 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-wdh4q"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.895760 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-72h2c"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.896575 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-q7g4h"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.897818 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-f9kbn"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.899031 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-ggrbw"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.900568 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-sspkx"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.901533 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-m54n8"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.902128 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-z98jb"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.903103 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-6fn5b"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.904139 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-w26jv"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.905191 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-zd8tq"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.907307 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-p929b"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.908128 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.908442 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-9jc2h"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.909412 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-mp878"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.910201 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e9fdda75-f32d-445e-9658-d135d0a548ae-serving-cert\") pod \"apiserver-7bbb656c7d-72h2c\" (UID: \"e9fdda75-f32d-445e-9658-d135d0a548ae\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-72h2c" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.910241 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/04f53da9-c9c4-4af6-a8e1-37e91549a81a-config\") pod \"apiserver-76f77b778f-dn6hh\" (UID: \"04f53da9-c9c4-4af6-a8e1-37e91549a81a\") " pod="openshift-apiserver/apiserver-76f77b778f-dn6hh" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.910267 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/9b4d3b1f-5c64-4f18-91ca-d70893516609-audit-dir\") pod \"oauth-openshift-558db77b4-ggrbw\" (UID: \"9b4d3b1f-5c64-4f18-91ca-d70893516609\") " pod="openshift-authentication/oauth-openshift-558db77b4-ggrbw" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.910290 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/9b4d3b1f-5c64-4f18-91ca-d70893516609-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-ggrbw\" (UID: \"9b4d3b1f-5c64-4f18-91ca-d70893516609\") " pod="openshift-authentication/oauth-openshift-558db77b4-ggrbw" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.910312 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/22c9ec1f-50e2-455a-81d3-70a1df82c3ce-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-qkmck\" (UID: \"22c9ec1f-50e2-455a-81d3-70a1df82c3ce\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qkmck" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.910333 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/9b4d3b1f-5c64-4f18-91ca-d70893516609-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-ggrbw\" (UID: \"9b4d3b1f-5c64-4f18-91ca-d70893516609\") " pod="openshift-authentication/oauth-openshift-558db77b4-ggrbw" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.910354 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/aaaa8cb6-0a5e-4dd2-b549-f449a6e964cd-service-ca-bundle\") pod \"authentication-operator-69f744f599-9jc2h\" (UID: \"aaaa8cb6-0a5e-4dd2-b549-f449a6e964cd\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-9jc2h" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.910373 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/04f53da9-c9c4-4af6-a8e1-37e91549a81a-audit-dir\") pod \"apiserver-76f77b778f-dn6hh\" (UID: \"04f53da9-c9c4-4af6-a8e1-37e91549a81a\") " pod="openshift-apiserver/apiserver-76f77b778f-dn6hh" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.910390 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/25198fe7-40d9-4add-871c-8f9adadddb75-config\") pod \"console-operator-58897d9998-z98jb\" (UID: \"25198fe7-40d9-4add-871c-8f9adadddb75\") " pod="openshift-console-operator/console-operator-58897d9998-z98jb" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.910418 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8bbf68aa-448e-453c-8df2-839594103920-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-zd8tq\" (UID: \"8bbf68aa-448e-453c-8df2-839594103920\") " pod="openshift-marketplace/marketplace-operator-79b997595-zd8tq" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.910434 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/9b4d3b1f-5c64-4f18-91ca-d70893516609-audit-dir\") pod \"oauth-openshift-558db77b4-ggrbw\" (UID: \"9b4d3b1f-5c64-4f18-91ca-d70893516609\") " pod="openshift-authentication/oauth-openshift-558db77b4-ggrbw" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.911173 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-9rc8w"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.910444 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7a05b22-40d7-44b3-ab76-5b1548e042ba-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-mghzk\" (UID: \"e7a05b22-40d7-44b3-ab76-5b1548e042ba\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-mghzk" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.911287 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7a05b22-40d7-44b3-ab76-5b1548e042ba-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-mghzk\" (UID: \"e7a05b22-40d7-44b3-ab76-5b1548e042ba\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-mghzk" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.911329 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fq2xb\" (UniqueName: \"kubernetes.io/projected/589fc21a-acb4-4311-9e79-f9ad27c6f187-kube-api-access-fq2xb\") pod \"ingress-canary-9mtt2\" (UID: \"589fc21a-acb4-4311-9e79-f9ad27c6f187\") " pod="openshift-ingress-canary/ingress-canary-9mtt2" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.911371 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/f7d3e23a-de5e-4562-9142-16955fe746ad-auth-proxy-config\") pod \"machine-approver-56656f9798-65hwm\" (UID: \"f7d3e23a-de5e-4562-9142-16955fe746ad\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-65hwm" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.911404 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/04f53da9-c9c4-4af6-a8e1-37e91549a81a-audit\") pod \"apiserver-76f77b778f-dn6hh\" (UID: \"04f53da9-c9c4-4af6-a8e1-37e91549a81a\") " pod="openshift-apiserver/apiserver-76f77b778f-dn6hh" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.911440 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/9b4d3b1f-5c64-4f18-91ca-d70893516609-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-ggrbw\" (UID: \"9b4d3b1f-5c64-4f18-91ca-d70893516609\") " pod="openshift-authentication/oauth-openshift-558db77b4-ggrbw" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.911468 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/9b4d3b1f-5c64-4f18-91ca-d70893516609-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-ggrbw\" (UID: \"9b4d3b1f-5c64-4f18-91ca-d70893516609\") " pod="openshift-authentication/oauth-openshift-558db77b4-ggrbw" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.911495 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-864j7\" (UniqueName: \"kubernetes.io/projected/25198fe7-40d9-4add-871c-8f9adadddb75-kube-api-access-864j7\") pod \"console-operator-58897d9998-z98jb\" (UID: \"25198fe7-40d9-4add-871c-8f9adadddb75\") " pod="openshift-console-operator/console-operator-58897d9998-z98jb" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.911518 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/12eb63da-c25d-4d6a-84e5-070b0c9bd1aa-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-jpq5h\" (UID: \"12eb63da-c25d-4d6a-84e5-070b0c9bd1aa\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-jpq5h" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.911542 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/d1ec861e-fbe3-412e-9885-43a9e3c5be1e-oauth-serving-cert\") pod \"console-f9d7485db-m54n8\" (UID: \"d1ec861e-fbe3-412e-9885-43a9e3c5be1e\") " pod="openshift-console/console-f9d7485db-m54n8" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.911567 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/9b4d3b1f-5c64-4f18-91ca-d70893516609-audit-policies\") pod \"oauth-openshift-558db77b4-ggrbw\" (UID: \"9b4d3b1f-5c64-4f18-91ca-d70893516609\") " pod="openshift-authentication/oauth-openshift-558db77b4-ggrbw" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.911593 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/b647f6d6-6425-43f9-9e70-3a66dee6f37c-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-mp878\" (UID: \"b647f6d6-6425-43f9-9e70-3a66dee6f37c\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-mp878" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.911619 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4j669\" (UniqueName: \"kubernetes.io/projected/dfcd7a67-a498-4c5b-82e9-c998a388b652-kube-api-access-4j669\") pod \"service-ca-operator-777779d784-4bw4h\" (UID: \"dfcd7a67-a498-4c5b-82e9-c998a388b652\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-4bw4h" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.911645 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/9b4d3b1f-5c64-4f18-91ca-d70893516609-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-ggrbw\" (UID: \"9b4d3b1f-5c64-4f18-91ca-d70893516609\") " pod="openshift-authentication/oauth-openshift-558db77b4-ggrbw" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.911673 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-47ptm\" (UniqueName: \"kubernetes.io/projected/e2755732-9869-46c9-be0e-e1fc77aa6644-kube-api-access-47ptm\") pod \"controller-manager-879f6c89f-5pm4x\" (UID: \"e2755732-9869-46c9-be0e-e1fc77aa6644\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5pm4x" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.911703 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/9c99dec9-4fc0-4576-81e4-703a690f4f45-proxy-tls\") pod \"machine-config-operator-74547568cd-lz2px\" (UID: \"9c99dec9-4fc0-4576-81e4-703a690f4f45\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-lz2px" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.911733 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/589fc21a-acb4-4311-9e79-f9ad27c6f187-cert\") pod \"ingress-canary-9mtt2\" (UID: \"589fc21a-acb4-4311-9e79-f9ad27c6f187\") " pod="openshift-ingress-canary/ingress-canary-9mtt2" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.911761 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/9b4d3b1f-5c64-4f18-91ca-d70893516609-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-ggrbw\" (UID: \"9b4d3b1f-5c64-4f18-91ca-d70893516609\") " pod="openshift-authentication/oauth-openshift-558db77b4-ggrbw" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.911795 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vkc7z\" (UniqueName: \"kubernetes.io/projected/22c9ec1f-50e2-455a-81d3-70a1df82c3ce-kube-api-access-vkc7z\") pod \"openshift-apiserver-operator-796bbdcf4f-qkmck\" (UID: \"22c9ec1f-50e2-455a-81d3-70a1df82c3ce\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qkmck" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.911810 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/04f53da9-c9c4-4af6-a8e1-37e91549a81a-config\") pod \"apiserver-76f77b778f-dn6hh\" (UID: \"04f53da9-c9c4-4af6-a8e1-37e91549a81a\") " pod="openshift-apiserver/apiserver-76f77b778f-dn6hh" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.911824 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6069f0ea-bc74-4858-a564-156202fbe36d-trusted-ca\") pod \"ingress-operator-5b745b69d9-p6f2d\" (UID: \"6069f0ea-bc74-4858-a564-156202fbe36d\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-p6f2d" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.911856 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d1ec861e-fbe3-412e-9885-43a9e3c5be1e-trusted-ca-bundle\") pod \"console-f9d7485db-m54n8\" (UID: \"d1ec861e-fbe3-412e-9885-43a9e3c5be1e\") " pod="openshift-console/console-f9d7485db-m54n8" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.911885 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f7d3e23a-de5e-4562-9142-16955fe746ad-config\") pod \"machine-approver-56656f9798-65hwm\" (UID: \"f7d3e23a-de5e-4562-9142-16955fe746ad\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-65hwm" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.911909 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/9b4d3b1f-5c64-4f18-91ca-d70893516609-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-ggrbw\" (UID: \"9b4d3b1f-5c64-4f18-91ca-d70893516609\") " pod="openshift-authentication/oauth-openshift-558db77b4-ggrbw" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.911932 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/9b4d3b1f-5c64-4f18-91ca-d70893516609-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-ggrbw\" (UID: \"9b4d3b1f-5c64-4f18-91ca-d70893516609\") " pod="openshift-authentication/oauth-openshift-558db77b4-ggrbw" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.911955 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/9c99dec9-4fc0-4576-81e4-703a690f4f45-auth-proxy-config\") pod \"machine-config-operator-74547568cd-lz2px\" (UID: \"9c99dec9-4fc0-4576-81e4-703a690f4f45\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-lz2px" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.911979 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/f49021c5-9619-47e6-a758-d800793dae08-etcd-service-ca\") pod \"etcd-operator-b45778765-6fn5b\" (UID: \"f49021c5-9619-47e6-a758-d800793dae08\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6fn5b" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.912031 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/9b4d3b1f-5c64-4f18-91ca-d70893516609-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-ggrbw\" (UID: \"9b4d3b1f-5c64-4f18-91ca-d70893516609\") " pod="openshift-authentication/oauth-openshift-558db77b4-ggrbw" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.912058 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-llzdq\" (UniqueName: \"kubernetes.io/projected/b647f6d6-6425-43f9-9e70-3a66dee6f37c-kube-api-access-llzdq\") pod \"machine-config-controller-84d6567774-mp878\" (UID: \"b647f6d6-6425-43f9-9e70-3a66dee6f37c\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-mp878" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.912079 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-bhlrh"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.912091 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/576691ca-af69-46de-b8c2-5b2195e3db0b-config\") pod \"machine-api-operator-5694c8668f-w26jv\" (UID: \"576691ca-af69-46de-b8c2-5b2195e3db0b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-w26jv" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.912112 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c9ec1f-50e2-455a-81d3-70a1df82c3ce-config\") pod \"openshift-apiserver-operator-796bbdcf4f-qkmck\" (UID: \"22c9ec1f-50e2-455a-81d3-70a1df82c3ce\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qkmck" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.912137 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8bdlf\" (UniqueName: \"kubernetes.io/projected/bb29fa4c-cfd8-45b5-a0d3-895fb20d5cb8-kube-api-access-8bdlf\") pod \"downloads-7954f5f757-7jhbl\" (UID: \"bb29fa4c-cfd8-45b5-a0d3-895fb20d5cb8\") " pod="openshift-console/downloads-7954f5f757-7jhbl" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.912160 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dfcd7a67-a498-4c5b-82e9-c998a388b652-config\") pod \"service-ca-operator-777779d784-4bw4h\" (UID: \"dfcd7a67-a498-4c5b-82e9-c998a388b652\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-4bw4h" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.912185 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/9b4d3b1f-5c64-4f18-91ca-d70893516609-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-ggrbw\" (UID: \"9b4d3b1f-5c64-4f18-91ca-d70893516609\") " pod="openshift-authentication/oauth-openshift-558db77b4-ggrbw" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.912204 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/9b4d3b1f-5c64-4f18-91ca-d70893516609-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-ggrbw\" (UID: \"9b4d3b1f-5c64-4f18-91ca-d70893516609\") " pod="openshift-authentication/oauth-openshift-558db77b4-ggrbw" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.912217 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m74kk\" (UniqueName: \"kubernetes.io/projected/9b4d3b1f-5c64-4f18-91ca-d70893516609-kube-api-access-m74kk\") pod \"oauth-openshift-558db77b4-ggrbw\" (UID: \"9b4d3b1f-5c64-4f18-91ca-d70893516609\") " pod="openshift-authentication/oauth-openshift-558db77b4-ggrbw" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.912246 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/aaaa8cb6-0a5e-4dd2-b549-f449a6e964cd-serving-cert\") pod \"authentication-operator-69f744f599-9jc2h\" (UID: \"aaaa8cb6-0a5e-4dd2-b549-f449a6e964cd\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-9jc2h" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.912256 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/25198fe7-40d9-4add-871c-8f9adadddb75-config\") pod \"console-operator-58897d9998-z98jb\" (UID: \"25198fe7-40d9-4add-871c-8f9adadddb75\") " pod="openshift-console-operator/console-operator-58897d9998-z98jb" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.912274 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f3cae218-7d6a-42c0-a269-ad8d27dfed75-serving-cert\") pod \"openshift-config-operator-7777fb866f-9rc8w\" (UID: \"f3cae218-7d6a-42c0-a269-ad8d27dfed75\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-9rc8w" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.912280 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/04f53da9-c9c4-4af6-a8e1-37e91549a81a-audit-dir\") pod \"apiserver-76f77b778f-dn6hh\" (UID: \"04f53da9-c9c4-4af6-a8e1-37e91549a81a\") " pod="openshift-apiserver/apiserver-76f77b778f-dn6hh" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.912311 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dfcd7a67-a498-4c5b-82e9-c998a388b652-serving-cert\") pod \"service-ca-operator-777779d784-4bw4h\" (UID: \"dfcd7a67-a498-4c5b-82e9-c998a388b652\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-4bw4h" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.912360 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dfvp5\" (UniqueName: \"kubernetes.io/projected/f3cae218-7d6a-42c0-a269-ad8d27dfed75-kube-api-access-dfvp5\") pod \"openshift-config-operator-7777fb866f-9rc8w\" (UID: \"f3cae218-7d6a-42c0-a269-ad8d27dfed75\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-9rc8w" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.912391 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/a06ef925-e180-4dd7-b57f-3d3017be6bd0-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-bhlrh\" (UID: \"a06ef925-e180-4dd7-b57f-3d3017be6bd0\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-bhlrh" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.912466 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rnxp6\" (UniqueName: \"kubernetes.io/projected/a06ef925-e180-4dd7-b57f-3d3017be6bd0-kube-api-access-rnxp6\") pod \"package-server-manager-789f6589d5-bhlrh\" (UID: \"a06ef925-e180-4dd7-b57f-3d3017be6bd0\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-bhlrh" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.912504 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/e2755732-9869-46c9-be0e-e1fc77aa6644-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-5pm4x\" (UID: \"e2755732-9869-46c9-be0e-e1fc77aa6644\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5pm4x" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.912532 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f49021c5-9619-47e6-a758-d800793dae08-config\") pod \"etcd-operator-b45778765-6fn5b\" (UID: \"f49021c5-9619-47e6-a758-d800793dae08\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6fn5b" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.912559 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/aaaa8cb6-0a5e-4dd2-b549-f449a6e964cd-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-9jc2h\" (UID: \"aaaa8cb6-0a5e-4dd2-b549-f449a6e964cd\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-9jc2h" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.912647 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/5686b316-695d-4eab-a619-1033b90d6d96-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-sspkx\" (UID: \"5686b316-695d-4eab-a619-1033b90d6d96\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-sspkx" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.912704 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2dbqh\" (UniqueName: \"kubernetes.io/projected/5686b316-695d-4eab-a619-1033b90d6d96-kube-api-access-2dbqh\") pod \"control-plane-machine-set-operator-78cbb6b69f-sspkx\" (UID: \"5686b316-695d-4eab-a619-1033b90d6d96\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-sspkx" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.912734 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kpxjs\" (UniqueName: \"kubernetes.io/projected/e9fdda75-f32d-445e-9658-d135d0a548ae-kube-api-access-kpxjs\") pod \"apiserver-7bbb656c7d-72h2c\" (UID: \"e9fdda75-f32d-445e-9658-d135d0a548ae\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-72h2c" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.912760 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/04f53da9-c9c4-4af6-a8e1-37e91549a81a-trusted-ca-bundle\") pod \"apiserver-76f77b778f-dn6hh\" (UID: \"04f53da9-c9c4-4af6-a8e1-37e91549a81a\") " pod="openshift-apiserver/apiserver-76f77b778f-dn6hh" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.912785 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/b647f6d6-6425-43f9-9e70-3a66dee6f37c-proxy-tls\") pod \"machine-config-controller-84d6567774-mp878\" (UID: \"b647f6d6-6425-43f9-9e70-3a66dee6f37c\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-mp878" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.912813 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d6lsx\" (UniqueName: \"kubernetes.io/projected/6069f0ea-bc74-4858-a564-156202fbe36d-kube-api-access-d6lsx\") pod \"ingress-operator-5b745b69d9-p6f2d\" (UID: \"6069f0ea-bc74-4858-a564-156202fbe36d\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-p6f2d" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.912837 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/25198fe7-40d9-4add-871c-8f9adadddb75-trusted-ca\") pod \"console-operator-58897d9998-z98jb\" (UID: \"25198fe7-40d9-4add-871c-8f9adadddb75\") " pod="openshift-console-operator/console-operator-58897d9998-z98jb" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.912860 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7a05b22-40d7-44b3-ab76-5b1548e042ba-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-mghzk\" (UID: \"e7a05b22-40d7-44b3-ab76-5b1548e042ba\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-mghzk" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.912884 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5gmp9\" (UniqueName: \"kubernetes.io/projected/f49021c5-9619-47e6-a758-d800793dae08-kube-api-access-5gmp9\") pod \"etcd-operator-b45778765-6fn5b\" (UID: \"f49021c5-9619-47e6-a758-d800793dae08\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6fn5b" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.912914 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/6069f0ea-bc74-4858-a564-156202fbe36d-metrics-tls\") pod \"ingress-operator-5b745b69d9-p6f2d\" (UID: \"6069f0ea-bc74-4858-a564-156202fbe36d\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-p6f2d" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.912938 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6069f0ea-bc74-4858-a564-156202fbe36d-bound-sa-token\") pod \"ingress-operator-5b745b69d9-p6f2d\" (UID: \"6069f0ea-bc74-4858-a564-156202fbe36d\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-p6f2d" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.912963 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/04f53da9-c9c4-4af6-a8e1-37e91549a81a-encryption-config\") pod \"apiserver-76f77b778f-dn6hh\" (UID: \"04f53da9-c9c4-4af6-a8e1-37e91549a81a\") " pod="openshift-apiserver/apiserver-76f77b778f-dn6hh" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.912991 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/576691ca-af69-46de-b8c2-5b2195e3db0b-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-w26jv\" (UID: \"576691ca-af69-46de-b8c2-5b2195e3db0b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-w26jv" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.913609 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/aaaa8cb6-0a5e-4dd2-b549-f449a6e964cd-service-ca-bundle\") pod \"authentication-operator-69f744f599-9jc2h\" (UID: \"aaaa8cb6-0a5e-4dd2-b549-f449a6e964cd\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-9jc2h" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.913834 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/9b4d3b1f-5c64-4f18-91ca-d70893516609-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-ggrbw\" (UID: \"9b4d3b1f-5c64-4f18-91ca-d70893516609\") " pod="openshift-authentication/oauth-openshift-558db77b4-ggrbw" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.914081 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e9fdda75-f32d-445e-9658-d135d0a548ae-serving-cert\") pod \"apiserver-7bbb656c7d-72h2c\" (UID: \"e9fdda75-f32d-445e-9658-d135d0a548ae\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-72h2c" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.914303 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/9b4d3b1f-5c64-4f18-91ca-d70893516609-audit-policies\") pod \"oauth-openshift-558db77b4-ggrbw\" (UID: \"9b4d3b1f-5c64-4f18-91ca-d70893516609\") " pod="openshift-authentication/oauth-openshift-558db77b4-ggrbw" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.914898 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/d1ec861e-fbe3-412e-9885-43a9e3c5be1e-console-oauth-config\") pod \"console-f9d7485db-m54n8\" (UID: \"d1ec861e-fbe3-412e-9885-43a9e3c5be1e\") " pod="openshift-console/console-f9d7485db-m54n8" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.914954 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m65sl\" (UniqueName: \"kubernetes.io/projected/9c99dec9-4fc0-4576-81e4-703a690f4f45-kube-api-access-m65sl\") pod \"machine-config-operator-74547568cd-lz2px\" (UID: \"9c99dec9-4fc0-4576-81e4-703a690f4f45\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-lz2px" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.915104 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/d1ec861e-fbe3-412e-9885-43a9e3c5be1e-service-ca\") pod \"console-f9d7485db-m54n8\" (UID: \"d1ec861e-fbe3-412e-9885-43a9e3c5be1e\") " pod="openshift-console/console-f9d7485db-m54n8" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.915154 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e9fdda75-f32d-445e-9658-d135d0a548ae-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-72h2c\" (UID: \"e9fdda75-f32d-445e-9658-d135d0a548ae\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-72h2c" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.915182 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gsp84\" (UniqueName: \"kubernetes.io/projected/8bbf68aa-448e-453c-8df2-839594103920-kube-api-access-gsp84\") pod \"marketplace-operator-79b997595-zd8tq\" (UID: \"8bbf68aa-448e-453c-8df2-839594103920\") " pod="openshift-marketplace/marketplace-operator-79b997595-zd8tq" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.915210 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/9c99dec9-4fc0-4576-81e4-703a690f4f45-images\") pod \"machine-config-operator-74547568cd-lz2px\" (UID: \"9c99dec9-4fc0-4576-81e4-703a690f4f45\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-lz2px" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.915283 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9b4d3b1f-5c64-4f18-91ca-d70893516609-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-ggrbw\" (UID: \"9b4d3b1f-5c64-4f18-91ca-d70893516609\") " pod="openshift-authentication/oauth-openshift-558db77b4-ggrbw" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.915312 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/8bbf68aa-448e-453c-8df2-839594103920-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-zd8tq\" (UID: \"8bbf68aa-448e-453c-8df2-839594103920\") " pod="openshift-marketplace/marketplace-operator-79b997595-zd8tq" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.915334 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/f49021c5-9619-47e6-a758-d800793dae08-etcd-client\") pod \"etcd-operator-b45778765-6fn5b\" (UID: \"f49021c5-9619-47e6-a758-d800793dae08\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6fn5b" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.915360 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/e9fdda75-f32d-445e-9658-d135d0a548ae-encryption-config\") pod \"apiserver-7bbb656c7d-72h2c\" (UID: \"e9fdda75-f32d-445e-9658-d135d0a548ae\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-72h2c" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.915383 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/d1ec861e-fbe3-412e-9885-43a9e3c5be1e-console-serving-cert\") pod \"console-f9d7485db-m54n8\" (UID: \"d1ec861e-fbe3-412e-9885-43a9e3c5be1e\") " pod="openshift-console/console-f9d7485db-m54n8" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.915411 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/04f53da9-c9c4-4af6-a8e1-37e91549a81a-image-import-ca\") pod \"apiserver-76f77b778f-dn6hh\" (UID: \"04f53da9-c9c4-4af6-a8e1-37e91549a81a\") " pod="openshift-apiserver/apiserver-76f77b778f-dn6hh" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.915643 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/9b4d3b1f-5c64-4f18-91ca-d70893516609-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-ggrbw\" (UID: \"9b4d3b1f-5c64-4f18-91ca-d70893516609\") " pod="openshift-authentication/oauth-openshift-558db77b4-ggrbw" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.916185 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-mghzk"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.916273 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/22c9ec1f-50e2-455a-81d3-70a1df82c3ce-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-qkmck\" (UID: \"22c9ec1f-50e2-455a-81d3-70a1df82c3ce\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qkmck" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.916535 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/9b4d3b1f-5c64-4f18-91ca-d70893516609-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-ggrbw\" (UID: \"9b4d3b1f-5c64-4f18-91ca-d70893516609\") " pod="openshift-authentication/oauth-openshift-558db77b4-ggrbw" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.916938 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-shtvf\" (UniqueName: \"kubernetes.io/projected/576691ca-af69-46de-b8c2-5b2195e3db0b-kube-api-access-shtvf\") pod \"machine-api-operator-5694c8668f-w26jv\" (UID: \"576691ca-af69-46de-b8c2-5b2195e3db0b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-w26jv" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.916988 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aaaa8cb6-0a5e-4dd2-b549-f449a6e964cd-config\") pod \"authentication-operator-69f744f599-9jc2h\" (UID: \"aaaa8cb6-0a5e-4dd2-b549-f449a6e964cd\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-9jc2h" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.928204 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/aaaa8cb6-0a5e-4dd2-b549-f449a6e964cd-serving-cert\") pod \"authentication-operator-69f744f599-9jc2h\" (UID: \"aaaa8cb6-0a5e-4dd2-b549-f449a6e964cd\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-9jc2h" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.928227 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/04f53da9-c9c4-4af6-a8e1-37e91549a81a-etcd-client\") pod \"apiserver-76f77b778f-dn6hh\" (UID: \"04f53da9-c9c4-4af6-a8e1-37e91549a81a\") " pod="openshift-apiserver/apiserver-76f77b778f-dn6hh" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.917121 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f7d3e23a-de5e-4562-9142-16955fe746ad-config\") pod \"machine-approver-56656f9798-65hwm\" (UID: \"f7d3e23a-de5e-4562-9142-16955fe746ad\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-65hwm" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.917161 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6069f0ea-bc74-4858-a564-156202fbe36d-trusted-ca\") pod \"ingress-operator-5b745b69d9-p6f2d\" (UID: \"6069f0ea-bc74-4858-a564-156202fbe36d\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-p6f2d" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.928279 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/04f53da9-c9c4-4af6-a8e1-37e91549a81a-serving-cert\") pod \"apiserver-76f77b778f-dn6hh\" (UID: \"04f53da9-c9c4-4af6-a8e1-37e91549a81a\") " pod="openshift-apiserver/apiserver-76f77b778f-dn6hh" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.918644 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d1ec861e-fbe3-412e-9885-43a9e3c5be1e-trusted-ca-bundle\") pod \"console-f9d7485db-m54n8\" (UID: \"d1ec861e-fbe3-412e-9885-43a9e3c5be1e\") " pod="openshift-console/console-f9d7485db-m54n8" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.919713 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/9b4d3b1f-5c64-4f18-91ca-d70893516609-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-ggrbw\" (UID: \"9b4d3b1f-5c64-4f18-91ca-d70893516609\") " pod="openshift-authentication/oauth-openshift-558db77b4-ggrbw" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.919783 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/e2755732-9869-46c9-be0e-e1fc77aa6644-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-5pm4x\" (UID: \"e2755732-9869-46c9-be0e-e1fc77aa6644\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5pm4x" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.919661 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/576691ca-af69-46de-b8c2-5b2195e3db0b-config\") pod \"machine-api-operator-5694c8668f-w26jv\" (UID: \"576691ca-af69-46de-b8c2-5b2195e3db0b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-w26jv" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.919890 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-jpq5h"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.920514 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c9ec1f-50e2-455a-81d3-70a1df82c3ce-config\") pod \"openshift-apiserver-operator-796bbdcf4f-qkmck\" (UID: \"22c9ec1f-50e2-455a-81d3-70a1df82c3ce\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qkmck" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.920693 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/9b4d3b1f-5c64-4f18-91ca-d70893516609-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-ggrbw\" (UID: \"9b4d3b1f-5c64-4f18-91ca-d70893516609\") " pod="openshift-authentication/oauth-openshift-558db77b4-ggrbw" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.921105 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/9b4d3b1f-5c64-4f18-91ca-d70893516609-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-ggrbw\" (UID: \"9b4d3b1f-5c64-4f18-91ca-d70893516609\") " pod="openshift-authentication/oauth-openshift-558db77b4-ggrbw" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.928847 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/25198fe7-40d9-4add-871c-8f9adadddb75-trusted-ca\") pod \"console-operator-58897d9998-z98jb\" (UID: \"25198fe7-40d9-4add-871c-8f9adadddb75\") " pod="openshift-console-operator/console-operator-58897d9998-z98jb" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.921135 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/9b4d3b1f-5c64-4f18-91ca-d70893516609-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-ggrbw\" (UID: \"9b4d3b1f-5c64-4f18-91ca-d70893516609\") " pod="openshift-authentication/oauth-openshift-558db77b4-ggrbw" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.921857 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e9fdda75-f32d-445e-9658-d135d0a548ae-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-72h2c\" (UID: \"e9fdda75-f32d-445e-9658-d135d0a548ae\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-72h2c" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.921876 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dfcd7a67-a498-4c5b-82e9-c998a388b652-config\") pod \"service-ca-operator-777779d784-4bw4h\" (UID: \"dfcd7a67-a498-4c5b-82e9-c998a388b652\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-4bw4h" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.922074 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/d1ec861e-fbe3-412e-9885-43a9e3c5be1e-service-ca\") pod \"console-f9d7485db-m54n8\" (UID: \"d1ec861e-fbe3-412e-9885-43a9e3c5be1e\") " pod="openshift-console/console-f9d7485db-m54n8" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.922205 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/04f53da9-c9c4-4af6-a8e1-37e91549a81a-trusted-ca-bundle\") pod \"apiserver-76f77b778f-dn6hh\" (UID: \"04f53da9-c9c4-4af6-a8e1-37e91549a81a\") " pod="openshift-apiserver/apiserver-76f77b778f-dn6hh" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.923463 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dfcd7a67-a498-4c5b-82e9-c998a388b652-serving-cert\") pod \"service-ca-operator-777779d784-4bw4h\" (UID: \"dfcd7a67-a498-4c5b-82e9-c998a388b652\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-4bw4h" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.923978 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/d1ec861e-fbe3-412e-9885-43a9e3c5be1e-oauth-serving-cert\") pod \"console-f9d7485db-m54n8\" (UID: \"d1ec861e-fbe3-412e-9885-43a9e3c5be1e\") " pod="openshift-console/console-f9d7485db-m54n8" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.924130 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f3cae218-7d6a-42c0-a269-ad8d27dfed75-serving-cert\") pod \"openshift-config-operator-7777fb866f-9rc8w\" (UID: \"f3cae218-7d6a-42c0-a269-ad8d27dfed75\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-9rc8w" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.924684 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/04f53da9-c9c4-4af6-a8e1-37e91549a81a-image-import-ca\") pod \"apiserver-76f77b778f-dn6hh\" (UID: \"04f53da9-c9c4-4af6-a8e1-37e91549a81a\") " pod="openshift-apiserver/apiserver-76f77b778f-dn6hh" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.925045 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/d1ec861e-fbe3-412e-9885-43a9e3c5be1e-console-serving-cert\") pod \"console-f9d7485db-m54n8\" (UID: \"d1ec861e-fbe3-412e-9885-43a9e3c5be1e\") " pod="openshift-console/console-f9d7485db-m54n8" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.925945 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9b4d3b1f-5c64-4f18-91ca-d70893516609-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-ggrbw\" (UID: \"9b4d3b1f-5c64-4f18-91ca-d70893516609\") " pod="openshift-authentication/oauth-openshift-558db77b4-ggrbw" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.927433 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/e9fdda75-f32d-445e-9658-d135d0a548ae-encryption-config\") pod \"apiserver-7bbb656c7d-72h2c\" (UID: \"e9fdda75-f32d-445e-9658-d135d0a548ae\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-72h2c" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.927721 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/576691ca-af69-46de-b8c2-5b2195e3db0b-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-w26jv\" (UID: \"576691ca-af69-46de-b8c2-5b2195e3db0b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-w26jv" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.928144 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/04f53da9-c9c4-4af6-a8e1-37e91549a81a-encryption-config\") pod \"apiserver-76f77b778f-dn6hh\" (UID: \"04f53da9-c9c4-4af6-a8e1-37e91549a81a\") " pod="openshift-apiserver/apiserver-76f77b778f-dn6hh" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.917436 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/04f53da9-c9c4-4af6-a8e1-37e91549a81a-audit\") pod \"apiserver-76f77b778f-dn6hh\" (UID: \"04f53da9-c9c4-4af6-a8e1-37e91549a81a\") " pod="openshift-apiserver/apiserver-76f77b778f-dn6hh" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.917392 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/f7d3e23a-de5e-4562-9142-16955fe746ad-auth-proxy-config\") pod \"machine-approver-56656f9798-65hwm\" (UID: \"f7d3e23a-de5e-4562-9142-16955fe746ad\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-65hwm" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.928324 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/f49021c5-9619-47e6-a758-d800793dae08-etcd-ca\") pod \"etcd-operator-b45778765-6fn5b\" (UID: \"f49021c5-9619-47e6-a758-d800793dae08\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6fn5b" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.927806 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/aaaa8cb6-0a5e-4dd2-b549-f449a6e964cd-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-9jc2h\" (UID: \"aaaa8cb6-0a5e-4dd2-b549-f449a6e964cd\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-9jc2h" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.930064 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/e9fdda75-f32d-445e-9658-d135d0a548ae-audit-dir\") pod \"apiserver-7bbb656c7d-72h2c\" (UID: \"e9fdda75-f32d-445e-9658-d135d0a548ae\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-72h2c" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.921122 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/12eb63da-c25d-4d6a-84e5-070b0c9bd1aa-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-jpq5h\" (UID: \"12eb63da-c25d-4d6a-84e5-070b0c9bd1aa\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-jpq5h" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.929935 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/e9fdda75-f32d-445e-9658-d135d0a548ae-audit-dir\") pod \"apiserver-7bbb656c7d-72h2c\" (UID: \"e9fdda75-f32d-445e-9658-d135d0a548ae\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-72h2c" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.930386 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/d1ec861e-fbe3-412e-9885-43a9e3c5be1e-console-config\") pod \"console-f9d7485db-m54n8\" (UID: \"d1ec861e-fbe3-412e-9885-43a9e3c5be1e\") " pod="openshift-console/console-f9d7485db-m54n8" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.930533 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/04f53da9-c9c4-4af6-a8e1-37e91549a81a-node-pullsecrets\") pod \"apiserver-76f77b778f-dn6hh\" (UID: \"04f53da9-c9c4-4af6-a8e1-37e91549a81a\") " pod="openshift-apiserver/apiserver-76f77b778f-dn6hh" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.930654 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/e9fdda75-f32d-445e-9658-d135d0a548ae-etcd-client\") pod \"apiserver-7bbb656c7d-72h2c\" (UID: \"e9fdda75-f32d-445e-9658-d135d0a548ae\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-72h2c" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.930768 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9jqz6\" (UniqueName: \"kubernetes.io/projected/f7d3e23a-de5e-4562-9142-16955fe746ad-kube-api-access-9jqz6\") pod \"machine-approver-56656f9798-65hwm\" (UID: \"f7d3e23a-de5e-4562-9142-16955fe746ad\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-65hwm" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.930932 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/04f53da9-c9c4-4af6-a8e1-37e91549a81a-etcd-serving-ca\") pod \"apiserver-76f77b778f-dn6hh\" (UID: \"04f53da9-c9c4-4af6-a8e1-37e91549a81a\") " pod="openshift-apiserver/apiserver-76f77b778f-dn6hh" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.931318 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.931334 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e2755732-9869-46c9-be0e-e1fc77aa6644-config\") pod \"controller-manager-879f6c89f-5pm4x\" (UID: \"e2755732-9869-46c9-be0e-e1fc77aa6644\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5pm4x" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.931391 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/25198fe7-40d9-4add-871c-8f9adadddb75-serving-cert\") pod \"console-operator-58897d9998-z98jb\" (UID: \"25198fe7-40d9-4add-871c-8f9adadddb75\") " pod="openshift-console-operator/console-operator-58897d9998-z98jb" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.931433 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/f7d3e23a-de5e-4562-9142-16955fe746ad-machine-approver-tls\") pod \"machine-approver-56656f9798-65hwm\" (UID: \"f7d3e23a-de5e-4562-9142-16955fe746ad\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-65hwm" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.931129 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/04f53da9-c9c4-4af6-a8e1-37e91549a81a-node-pullsecrets\") pod \"apiserver-76f77b778f-dn6hh\" (UID: \"04f53da9-c9c4-4af6-a8e1-37e91549a81a\") " pod="openshift-apiserver/apiserver-76f77b778f-dn6hh" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.931516 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/f3cae218-7d6a-42c0-a269-ad8d27dfed75-available-featuregates\") pod \"openshift-config-operator-7777fb866f-9rc8w\" (UID: \"f3cae218-7d6a-42c0-a269-ad8d27dfed75\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-9rc8w" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.931562 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e2755732-9869-46c9-be0e-e1fc77aa6644-client-ca\") pod \"controller-manager-879f6c89f-5pm4x\" (UID: \"e2755732-9869-46c9-be0e-e1fc77aa6644\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5pm4x" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.931300 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/04f53da9-c9c4-4af6-a8e1-37e91549a81a-etcd-client\") pod \"apiserver-76f77b778f-dn6hh\" (UID: \"04f53da9-c9c4-4af6-a8e1-37e91549a81a\") " pod="openshift-apiserver/apiserver-76f77b778f-dn6hh" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.930951 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/6069f0ea-bc74-4858-a564-156202fbe36d-metrics-tls\") pod \"ingress-operator-5b745b69d9-p6f2d\" (UID: \"6069f0ea-bc74-4858-a564-156202fbe36d\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-p6f2d" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.931605 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e2755732-9869-46c9-be0e-e1fc77aa6644-serving-cert\") pod \"controller-manager-879f6c89f-5pm4x\" (UID: \"e2755732-9869-46c9-be0e-e1fc77aa6644\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5pm4x" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.931659 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/e9fdda75-f32d-445e-9658-d135d0a548ae-audit-policies\") pod \"apiserver-7bbb656c7d-72h2c\" (UID: \"e9fdda75-f32d-445e-9658-d135d0a548ae\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-72h2c" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.931701 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/e9fdda75-f32d-445e-9658-d135d0a548ae-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-72h2c\" (UID: \"e9fdda75-f32d-445e-9658-d135d0a548ae\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-72h2c" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.931778 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ktl5t\" (UniqueName: \"kubernetes.io/projected/aaaa8cb6-0a5e-4dd2-b549-f449a6e964cd-kube-api-access-ktl5t\") pod \"authentication-operator-69f744f599-9jc2h\" (UID: \"aaaa8cb6-0a5e-4dd2-b549-f449a6e964cd\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-9jc2h" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.931785 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/d1ec861e-fbe3-412e-9885-43a9e3c5be1e-console-config\") pod \"console-f9d7485db-m54n8\" (UID: \"d1ec861e-fbe3-412e-9885-43a9e3c5be1e\") " pod="openshift-console/console-f9d7485db-m54n8" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.931852 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jkl5p\" (UniqueName: \"kubernetes.io/projected/d1ec861e-fbe3-412e-9885-43a9e3c5be1e-kube-api-access-jkl5p\") pod \"console-f9d7485db-m54n8\" (UID: \"d1ec861e-fbe3-412e-9885-43a9e3c5be1e\") " pod="openshift-console/console-f9d7485db-m54n8" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.931930 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/576691ca-af69-46de-b8c2-5b2195e3db0b-images\") pod \"machine-api-operator-5694c8668f-w26jv\" (UID: \"576691ca-af69-46de-b8c2-5b2195e3db0b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-w26jv" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.931963 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jrqqk\" (UniqueName: \"kubernetes.io/projected/04f53da9-c9c4-4af6-a8e1-37e91549a81a-kube-api-access-jrqqk\") pod \"apiserver-76f77b778f-dn6hh\" (UID: \"04f53da9-c9c4-4af6-a8e1-37e91549a81a\") " pod="openshift-apiserver/apiserver-76f77b778f-dn6hh" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.931992 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kcbq8\" (UniqueName: \"kubernetes.io/projected/12eb63da-c25d-4d6a-84e5-070b0c9bd1aa-kube-api-access-kcbq8\") pod \"cluster-samples-operator-665b6dd947-jpq5h\" (UID: \"12eb63da-c25d-4d6a-84e5-070b0c9bd1aa\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-jpq5h" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.932048 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f49021c5-9619-47e6-a758-d800793dae08-serving-cert\") pod \"etcd-operator-b45778765-6fn5b\" (UID: \"f49021c5-9619-47e6-a758-d800793dae08\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6fn5b" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.932493 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-9zjjz"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.933537 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e2755732-9869-46c9-be0e-e1fc77aa6644-config\") pod \"controller-manager-879f6c89f-5pm4x\" (UID: \"e2755732-9869-46c9-be0e-e1fc77aa6644\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5pm4x" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.933560 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/04f53da9-c9c4-4af6-a8e1-37e91549a81a-serving-cert\") pod \"apiserver-76f77b778f-dn6hh\" (UID: \"04f53da9-c9c4-4af6-a8e1-37e91549a81a\") " pod="openshift-apiserver/apiserver-76f77b778f-dn6hh" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.933574 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/f3cae218-7d6a-42c0-a269-ad8d27dfed75-available-featuregates\") pod \"openshift-config-operator-7777fb866f-9rc8w\" (UID: \"f3cae218-7d6a-42c0-a269-ad8d27dfed75\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-9rc8w" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.933890 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aaaa8cb6-0a5e-4dd2-b549-f449a6e964cd-config\") pod \"authentication-operator-69f744f599-9jc2h\" (UID: \"aaaa8cb6-0a5e-4dd2-b549-f449a6e964cd\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-9jc2h" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.934582 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e2755732-9869-46c9-be0e-e1fc77aa6644-client-ca\") pod \"controller-manager-879f6c89f-5pm4x\" (UID: \"e2755732-9869-46c9-be0e-e1fc77aa6644\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5pm4x" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.934742 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/e9fdda75-f32d-445e-9658-d135d0a548ae-audit-policies\") pod \"apiserver-7bbb656c7d-72h2c\" (UID: \"e9fdda75-f32d-445e-9658-d135d0a548ae\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-72h2c" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.934865 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-9zjjz" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.935242 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/e9fdda75-f32d-445e-9658-d135d0a548ae-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-72h2c\" (UID: \"e9fdda75-f32d-445e-9658-d135d0a548ae\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-72h2c" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.919883 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/9b4d3b1f-5c64-4f18-91ca-d70893516609-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-ggrbw\" (UID: \"9b4d3b1f-5c64-4f18-91ca-d70893516609\") " pod="openshift-authentication/oauth-openshift-558db77b4-ggrbw" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.937504 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/f7d3e23a-de5e-4562-9142-16955fe746ad-machine-approver-tls\") pod \"machine-approver-56656f9798-65hwm\" (UID: \"f7d3e23a-de5e-4562-9142-16955fe746ad\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-65hwm" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.939553 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/e9fdda75-f32d-445e-9658-d135d0a548ae-etcd-client\") pod \"apiserver-7bbb656c7d-72h2c\" (UID: \"e9fdda75-f32d-445e-9658-d135d0a548ae\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-72h2c" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.939659 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/25198fe7-40d9-4add-871c-8f9adadddb75-serving-cert\") pod \"console-operator-58897d9998-z98jb\" (UID: \"25198fe7-40d9-4add-871c-8f9adadddb75\") " pod="openshift-console-operator/console-operator-58897d9998-z98jb" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.942936 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-5rjt4"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.943805 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e2755732-9869-46c9-be0e-e1fc77aa6644-serving-cert\") pod \"controller-manager-879f6c89f-5pm4x\" (UID: \"e2755732-9869-46c9-be0e-e1fc77aa6644\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5pm4x" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.944610 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/9b4d3b1f-5c64-4f18-91ca-d70893516609-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-ggrbw\" (UID: \"9b4d3b1f-5c64-4f18-91ca-d70893516609\") " pod="openshift-authentication/oauth-openshift-558db77b4-ggrbw" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.945037 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-9qx4w"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.946249 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-dvnq8"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.946367 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-9qx4w" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.947153 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/576691ca-af69-46de-b8c2-5b2195e3db0b-images\") pod \"machine-api-operator-5694c8668f-w26jv\" (UID: \"576691ca-af69-46de-b8c2-5b2195e3db0b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-w26jv" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.948548 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/04f53da9-c9c4-4af6-a8e1-37e91549a81a-etcd-serving-ca\") pod \"apiserver-76f77b778f-dn6hh\" (UID: \"04f53da9-c9c4-4af6-a8e1-37e91549a81a\") " pod="openshift-apiserver/apiserver-76f77b778f-dn6hh" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.949018 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-7jhbl"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.949233 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.950158 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-4bw4h"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.950505 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/d1ec861e-fbe3-412e-9885-43a9e3c5be1e-console-oauth-config\") pod \"console-f9d7485db-m54n8\" (UID: \"d1ec861e-fbe3-412e-9885-43a9e3c5be1e\") " pod="openshift-console/console-f9d7485db-m54n8" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.951288 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-sjflw"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.952439 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395530-jlnll"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.953522 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-7jqbx"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.954624 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-lz2px"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.956768 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-9zjjz"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.958347 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-9mtt2"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.959456 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-ddmrq"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.960530 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-zfc95"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.962067 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-9qx4w"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.963498 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-m4jcp"] Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.964512 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-m4jcp" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.968016 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 21 13:44:57 crc kubenswrapper[5133]: I1121 13:44:57.988865 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.009604 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.028484 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.032754 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/a06ef925-e180-4dd7-b57f-3d3017be6bd0-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-bhlrh\" (UID: \"a06ef925-e180-4dd7-b57f-3d3017be6bd0\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-bhlrh" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.032786 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rnxp6\" (UniqueName: \"kubernetes.io/projected/a06ef925-e180-4dd7-b57f-3d3017be6bd0-kube-api-access-rnxp6\") pod \"package-server-manager-789f6589d5-bhlrh\" (UID: \"a06ef925-e180-4dd7-b57f-3d3017be6bd0\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-bhlrh" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.032812 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f49021c5-9619-47e6-a758-d800793dae08-config\") pod \"etcd-operator-b45778765-6fn5b\" (UID: \"f49021c5-9619-47e6-a758-d800793dae08\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6fn5b" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.032861 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/5686b316-695d-4eab-a619-1033b90d6d96-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-sspkx\" (UID: \"5686b316-695d-4eab-a619-1033b90d6d96\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-sspkx" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.032881 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2dbqh\" (UniqueName: \"kubernetes.io/projected/5686b316-695d-4eab-a619-1033b90d6d96-kube-api-access-2dbqh\") pod \"control-plane-machine-set-operator-78cbb6b69f-sspkx\" (UID: \"5686b316-695d-4eab-a619-1033b90d6d96\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-sspkx" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.032906 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/b647f6d6-6425-43f9-9e70-3a66dee6f37c-proxy-tls\") pod \"machine-config-controller-84d6567774-mp878\" (UID: \"b647f6d6-6425-43f9-9e70-3a66dee6f37c\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-mp878" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.032935 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7a05b22-40d7-44b3-ab76-5b1548e042ba-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-mghzk\" (UID: \"e7a05b22-40d7-44b3-ab76-5b1548e042ba\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-mghzk" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.032960 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5gmp9\" (UniqueName: \"kubernetes.io/projected/f49021c5-9619-47e6-a758-d800793dae08-kube-api-access-5gmp9\") pod \"etcd-operator-b45778765-6fn5b\" (UID: \"f49021c5-9619-47e6-a758-d800793dae08\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6fn5b" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.032980 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m65sl\" (UniqueName: \"kubernetes.io/projected/9c99dec9-4fc0-4576-81e4-703a690f4f45-kube-api-access-m65sl\") pod \"machine-config-operator-74547568cd-lz2px\" (UID: \"9c99dec9-4fc0-4576-81e4-703a690f4f45\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-lz2px" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.033023 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gsp84\" (UniqueName: \"kubernetes.io/projected/8bbf68aa-448e-453c-8df2-839594103920-kube-api-access-gsp84\") pod \"marketplace-operator-79b997595-zd8tq\" (UID: \"8bbf68aa-448e-453c-8df2-839594103920\") " pod="openshift-marketplace/marketplace-operator-79b997595-zd8tq" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.033065 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/9c99dec9-4fc0-4576-81e4-703a690f4f45-images\") pod \"machine-config-operator-74547568cd-lz2px\" (UID: \"9c99dec9-4fc0-4576-81e4-703a690f4f45\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-lz2px" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.033084 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/8bbf68aa-448e-453c-8df2-839594103920-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-zd8tq\" (UID: \"8bbf68aa-448e-453c-8df2-839594103920\") " pod="openshift-marketplace/marketplace-operator-79b997595-zd8tq" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.033104 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/f49021c5-9619-47e6-a758-d800793dae08-etcd-client\") pod \"etcd-operator-b45778765-6fn5b\" (UID: \"f49021c5-9619-47e6-a758-d800793dae08\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6fn5b" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.033155 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/f49021c5-9619-47e6-a758-d800793dae08-etcd-ca\") pod \"etcd-operator-b45778765-6fn5b\" (UID: \"f49021c5-9619-47e6-a758-d800793dae08\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6fn5b" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.033249 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f49021c5-9619-47e6-a758-d800793dae08-serving-cert\") pod \"etcd-operator-b45778765-6fn5b\" (UID: \"f49021c5-9619-47e6-a758-d800793dae08\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6fn5b" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.033277 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8bbf68aa-448e-453c-8df2-839594103920-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-zd8tq\" (UID: \"8bbf68aa-448e-453c-8df2-839594103920\") " pod="openshift-marketplace/marketplace-operator-79b997595-zd8tq" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.033297 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fq2xb\" (UniqueName: \"kubernetes.io/projected/589fc21a-acb4-4311-9e79-f9ad27c6f187-kube-api-access-fq2xb\") pod \"ingress-canary-9mtt2\" (UID: \"589fc21a-acb4-4311-9e79-f9ad27c6f187\") " pod="openshift-ingress-canary/ingress-canary-9mtt2" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.033316 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7a05b22-40d7-44b3-ab76-5b1548e042ba-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-mghzk\" (UID: \"e7a05b22-40d7-44b3-ab76-5b1548e042ba\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-mghzk" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.033337 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7a05b22-40d7-44b3-ab76-5b1548e042ba-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-mghzk\" (UID: \"e7a05b22-40d7-44b3-ab76-5b1548e042ba\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-mghzk" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.033368 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/b647f6d6-6425-43f9-9e70-3a66dee6f37c-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-mp878\" (UID: \"b647f6d6-6425-43f9-9e70-3a66dee6f37c\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-mp878" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.033410 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/9c99dec9-4fc0-4576-81e4-703a690f4f45-proxy-tls\") pod \"machine-config-operator-74547568cd-lz2px\" (UID: \"9c99dec9-4fc0-4576-81e4-703a690f4f45\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-lz2px" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.033449 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/589fc21a-acb4-4311-9e79-f9ad27c6f187-cert\") pod \"ingress-canary-9mtt2\" (UID: \"589fc21a-acb4-4311-9e79-f9ad27c6f187\") " pod="openshift-ingress-canary/ingress-canary-9mtt2" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.033484 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/9c99dec9-4fc0-4576-81e4-703a690f4f45-auth-proxy-config\") pod \"machine-config-operator-74547568cd-lz2px\" (UID: \"9c99dec9-4fc0-4576-81e4-703a690f4f45\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-lz2px" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.033501 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/f49021c5-9619-47e6-a758-d800793dae08-etcd-service-ca\") pod \"etcd-operator-b45778765-6fn5b\" (UID: \"f49021c5-9619-47e6-a758-d800793dae08\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6fn5b" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.033529 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-llzdq\" (UniqueName: \"kubernetes.io/projected/b647f6d6-6425-43f9-9e70-3a66dee6f37c-kube-api-access-llzdq\") pod \"machine-config-controller-84d6567774-mp878\" (UID: \"b647f6d6-6425-43f9-9e70-3a66dee6f37c\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-mp878" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.034276 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/9c99dec9-4fc0-4576-81e4-703a690f4f45-auth-proxy-config\") pod \"machine-config-operator-74547568cd-lz2px\" (UID: \"9c99dec9-4fc0-4576-81e4-703a690f4f45\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-lz2px" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.034460 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/b647f6d6-6425-43f9-9e70-3a66dee6f37c-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-mp878\" (UID: \"b647f6d6-6425-43f9-9e70-3a66dee6f37c\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-mp878" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.082533 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/85aadf35-e509-4da3-92a1-8dab8ec0a6b5-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-z8962\" (UID: \"85aadf35-e509-4da3-92a1-8dab8ec0a6b5\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-z8962" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.102984 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7vf6l\" (UniqueName: \"kubernetes.io/projected/7ef69baf-6993-472e-bb5c-405965d65d22-kube-api-access-7vf6l\") pod \"openshift-controller-manager-operator-756b6f6bc6-wdh4q\" (UID: \"7ef69baf-6993-472e-bb5c-405965d65d22\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-wdh4q" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.122717 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nj6bv\" (UniqueName: \"kubernetes.io/projected/85aadf35-e509-4da3-92a1-8dab8ec0a6b5-kube-api-access-nj6bv\") pod \"cluster-image-registry-operator-dc59b4c8b-z8962\" (UID: \"85aadf35-e509-4da3-92a1-8dab8ec0a6b5\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-z8962" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.128594 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.149197 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.168511 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.178094 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/a06ef925-e180-4dd7-b57f-3d3017be6bd0-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-bhlrh\" (UID: \"a06ef925-e180-4dd7-b57f-3d3017be6bd0\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-bhlrh" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.190223 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.211192 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.229694 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.249121 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.269727 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.289237 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.302562 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-z8962" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.309482 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.349288 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.353264 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p7jl9\" (UniqueName: \"kubernetes.io/projected/241eb3eb-c31a-4fe5-8547-34a326d75803-kube-api-access-p7jl9\") pod \"route-controller-manager-6576b87f9c-zg29g\" (UID: \"241eb3eb-c31a-4fe5-8547-34a326d75803\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zg29g" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.355782 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"c8879a10ead1e8e2685bcc83521d3829dce04cbb01592ad59cbe2df2ab75305c"} Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.356792 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"69a6a1bdb8a25f5e4b8524c9c150dafe1c8ab9f3befa041e844e5e0cce8e0d86"} Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.361580 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-wdh4q" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.370070 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.389426 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.411644 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.421512 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/8bbf68aa-448e-453c-8df2-839594103920-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-zd8tq\" (UID: \"8bbf68aa-448e-453c-8df2-839594103920\") " pod="openshift-marketplace/marketplace-operator-79b997595-zd8tq" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.430219 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.449675 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.490956 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.493145 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.497866 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8bbf68aa-448e-453c-8df2-839594103920-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-zd8tq\" (UID: \"8bbf68aa-448e-453c-8df2-839594103920\") " pod="openshift-marketplace/marketplace-operator-79b997595-zd8tq" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.510041 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.528809 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.548097 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.557340 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-z8962"] Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.566284 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-wdh4q"] Nov 21 13:44:58 crc kubenswrapper[5133]: W1121 13:44:58.567763 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod85aadf35_e509_4da3_92a1_8dab8ec0a6b5.slice/crio-d79a12d7a8bea236dfb0aa8c90f4d9eeb39215e718817e0dd75678c208102404 WatchSource:0}: Error finding container d79a12d7a8bea236dfb0aa8c90f4d9eeb39215e718817e0dd75678c208102404: Status 404 returned error can't find the container with id d79a12d7a8bea236dfb0aa8c90f4d9eeb39215e718817e0dd75678c208102404 Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.569233 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.578466 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zg29g" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.589492 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.594845 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f49021c5-9619-47e6-a758-d800793dae08-config\") pod \"etcd-operator-b45778765-6fn5b\" (UID: \"f49021c5-9619-47e6-a758-d800793dae08\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6fn5b" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.609778 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.630207 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.648658 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.659317 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f49021c5-9619-47e6-a758-d800793dae08-serving-cert\") pod \"etcd-operator-b45778765-6fn5b\" (UID: \"f49021c5-9619-47e6-a758-d800793dae08\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6fn5b" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.669825 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.674682 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/f49021c5-9619-47e6-a758-d800793dae08-etcd-ca\") pod \"etcd-operator-b45778765-6fn5b\" (UID: \"f49021c5-9619-47e6-a758-d800793dae08\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6fn5b" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.688742 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.694654 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/f49021c5-9619-47e6-a758-d800793dae08-etcd-service-ca\") pod \"etcd-operator-b45778765-6fn5b\" (UID: \"f49021c5-9619-47e6-a758-d800793dae08\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6fn5b" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.710845 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.732149 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.740324 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/f49021c5-9619-47e6-a758-d800793dae08-etcd-client\") pod \"etcd-operator-b45778765-6fn5b\" (UID: \"f49021c5-9619-47e6-a758-d800793dae08\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6fn5b" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.749805 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.759687 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/5686b316-695d-4eab-a619-1033b90d6d96-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-sspkx\" (UID: \"5686b316-695d-4eab-a619-1033b90d6d96\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-sspkx" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.769177 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.788836 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.794815 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/9c99dec9-4fc0-4576-81e4-703a690f4f45-images\") pod \"machine-config-operator-74547568cd-lz2px\" (UID: \"9c99dec9-4fc0-4576-81e4-703a690f4f45\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-lz2px" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.812642 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.825145 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-zg29g"] Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.829431 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 21 13:44:58 crc kubenswrapper[5133]: W1121 13:44:58.834452 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod241eb3eb_c31a_4fe5_8547_34a326d75803.slice/crio-f6e1d489911c2aa1988817c3927a2c6cc36caef5b152a31ebb03038989b30777 WatchSource:0}: Error finding container f6e1d489911c2aa1988817c3927a2c6cc36caef5b152a31ebb03038989b30777: Status 404 returned error can't find the container with id f6e1d489911c2aa1988817c3927a2c6cc36caef5b152a31ebb03038989b30777 Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.838137 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/9c99dec9-4fc0-4576-81e4-703a690f4f45-proxy-tls\") pod \"machine-config-operator-74547568cd-lz2px\" (UID: \"9c99dec9-4fc0-4576-81e4-703a690f4f45\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-lz2px" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.848766 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.869416 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.887079 5133 request.go:700] Waited for 1.019668465s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/secrets?fieldSelector=metadata.name%3Dimage-registry-tls&limit=500&resourceVersion=0 Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.889689 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.908707 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.928868 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.950779 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.970077 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 21 13:44:58 crc kubenswrapper[5133]: I1121 13:44:58.988882 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.009525 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.031393 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 21 13:44:59 crc kubenswrapper[5133]: E1121 13:44:59.034160 5133 secret.go:188] Couldn't get secret openshift-ingress-canary/canary-serving-cert: failed to sync secret cache: timed out waiting for the condition Nov 21 13:44:59 crc kubenswrapper[5133]: E1121 13:44:59.034240 5133 secret.go:188] Couldn't get secret openshift-machine-config-operator/mcc-proxy-tls: failed to sync secret cache: timed out waiting for the condition Nov 21 13:44:59 crc kubenswrapper[5133]: E1121 13:44:59.034270 5133 configmap.go:193] Couldn't get configMap openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-config: failed to sync configmap cache: timed out waiting for the condition Nov 21 13:44:59 crc kubenswrapper[5133]: E1121 13:44:59.034298 5133 secret.go:188] Couldn't get secret openshift-kube-scheduler-operator/kube-scheduler-operator-serving-cert: failed to sync secret cache: timed out waiting for the condition Nov 21 13:44:59 crc kubenswrapper[5133]: E1121 13:44:59.034258 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/589fc21a-acb4-4311-9e79-f9ad27c6f187-cert podName:589fc21a-acb4-4311-9e79-f9ad27c6f187 nodeName:}" failed. No retries permitted until 2025-11-21 13:44:59.53423488 +0000 UTC m=+159.332067128 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/589fc21a-acb4-4311-9e79-f9ad27c6f187-cert") pod "ingress-canary-9mtt2" (UID: "589fc21a-acb4-4311-9e79-f9ad27c6f187") : failed to sync secret cache: timed out waiting for the condition Nov 21 13:44:59 crc kubenswrapper[5133]: E1121 13:44:59.034351 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/e7a05b22-40d7-44b3-ab76-5b1548e042ba-config podName:e7a05b22-40d7-44b3-ab76-5b1548e042ba nodeName:}" failed. No retries permitted until 2025-11-21 13:44:59.534334443 +0000 UTC m=+159.332166691 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/e7a05b22-40d7-44b3-ab76-5b1548e042ba-config") pod "openshift-kube-scheduler-operator-5fdd9b5758-mghzk" (UID: "e7a05b22-40d7-44b3-ab76-5b1548e042ba") : failed to sync configmap cache: timed out waiting for the condition Nov 21 13:44:59 crc kubenswrapper[5133]: E1121 13:44:59.034372 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b647f6d6-6425-43f9-9e70-3a66dee6f37c-proxy-tls podName:b647f6d6-6425-43f9-9e70-3a66dee6f37c nodeName:}" failed. No retries permitted until 2025-11-21 13:44:59.534361734 +0000 UTC m=+159.332193982 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "proxy-tls" (UniqueName: "kubernetes.io/secret/b647f6d6-6425-43f9-9e70-3a66dee6f37c-proxy-tls") pod "machine-config-controller-84d6567774-mp878" (UID: "b647f6d6-6425-43f9-9e70-3a66dee6f37c") : failed to sync secret cache: timed out waiting for the condition Nov 21 13:44:59 crc kubenswrapper[5133]: E1121 13:44:59.034390 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e7a05b22-40d7-44b3-ab76-5b1548e042ba-serving-cert podName:e7a05b22-40d7-44b3-ab76-5b1548e042ba nodeName:}" failed. No retries permitted until 2025-11-21 13:44:59.534382794 +0000 UTC m=+159.332215042 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/e7a05b22-40d7-44b3-ab76-5b1548e042ba-serving-cert") pod "openshift-kube-scheduler-operator-5fdd9b5758-mghzk" (UID: "e7a05b22-40d7-44b3-ab76-5b1548e042ba") : failed to sync secret cache: timed out waiting for the condition Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.049106 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.069183 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.089020 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.109953 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.129043 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.148950 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.169088 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.190484 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.210363 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.235406 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.260798 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.269609 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.289076 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.309546 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.329925 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.348348 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.362336 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zg29g" event={"ID":"241eb3eb-c31a-4fe5-8547-34a326d75803","Type":"ContainerStarted","Data":"8ac09f959915ca968af51204932c60a203e4932dc5d459af8cdcd176c1462b7a"} Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.362420 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zg29g" event={"ID":"241eb3eb-c31a-4fe5-8547-34a326d75803","Type":"ContainerStarted","Data":"f6e1d489911c2aa1988817c3927a2c6cc36caef5b152a31ebb03038989b30777"} Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.362990 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zg29g" Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.364662 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-wdh4q" event={"ID":"7ef69baf-6993-472e-bb5c-405965d65d22","Type":"ContainerStarted","Data":"be21da06bdbbcfff02d3f22b47cf6145620c4433df81291d7c1169ff1a623a01"} Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.364730 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-wdh4q" event={"ID":"7ef69baf-6993-472e-bb5c-405965d65d22","Type":"ContainerStarted","Data":"f5916846e1e31b182e5b60a377ffea8088b46cde5e18b1e17e11e316433a26e7"} Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.364980 5133 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-zg29g container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.5:8443/healthz\": dial tcp 10.217.0.5:8443: connect: connection refused" start-of-body= Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.365057 5133 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zg29g" podUID="241eb3eb-c31a-4fe5-8547-34a326d75803" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.5:8443/healthz\": dial tcp 10.217.0.5:8443: connect: connection refused" Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.367488 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-z8962" event={"ID":"85aadf35-e509-4da3-92a1-8dab8ec0a6b5","Type":"ContainerStarted","Data":"5dfc1f5aa7d17198ccac84fe9c2e2b6bd10496b61f39069d8582201912a1f441"} Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.367533 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-z8962" event={"ID":"85aadf35-e509-4da3-92a1-8dab8ec0a6b5","Type":"ContainerStarted","Data":"d79a12d7a8bea236dfb0aa8c90f4d9eeb39215e718817e0dd75678c208102404"} Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.368103 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.369387 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"3d4becb90149f854ab676eba8c2fe1931085de8987aced0fe14d8fe3190df4eb"} Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.369472 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.388571 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.409647 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.429397 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.448928 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.489405 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.509933 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.529340 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.549829 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.563052 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/b647f6d6-6425-43f9-9e70-3a66dee6f37c-proxy-tls\") pod \"machine-config-controller-84d6567774-mp878\" (UID: \"b647f6d6-6425-43f9-9e70-3a66dee6f37c\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-mp878" Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.563219 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7a05b22-40d7-44b3-ab76-5b1548e042ba-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-mghzk\" (UID: \"e7a05b22-40d7-44b3-ab76-5b1548e042ba\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-mghzk" Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.563260 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7a05b22-40d7-44b3-ab76-5b1548e042ba-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-mghzk\" (UID: \"e7a05b22-40d7-44b3-ab76-5b1548e042ba\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-mghzk" Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.563325 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/589fc21a-acb4-4311-9e79-f9ad27c6f187-cert\") pod \"ingress-canary-9mtt2\" (UID: \"589fc21a-acb4-4311-9e79-f9ad27c6f187\") " pod="openshift-ingress-canary/ingress-canary-9mtt2" Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.564769 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7a05b22-40d7-44b3-ab76-5b1548e042ba-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-mghzk\" (UID: \"e7a05b22-40d7-44b3-ab76-5b1548e042ba\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-mghzk" Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.568268 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7a05b22-40d7-44b3-ab76-5b1548e042ba-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-mghzk\" (UID: \"e7a05b22-40d7-44b3-ab76-5b1548e042ba\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-mghzk" Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.569723 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.569894 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/b647f6d6-6425-43f9-9e70-3a66dee6f37c-proxy-tls\") pod \"machine-config-controller-84d6567774-mp878\" (UID: \"b647f6d6-6425-43f9-9e70-3a66dee6f37c\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-mp878" Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.579984 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/589fc21a-acb4-4311-9e79-f9ad27c6f187-cert\") pod \"ingress-canary-9mtt2\" (UID: \"589fc21a-acb4-4311-9e79-f9ad27c6f187\") " pod="openshift-ingress-canary/ingress-canary-9mtt2" Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.610453 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4j669\" (UniqueName: \"kubernetes.io/projected/dfcd7a67-a498-4c5b-82e9-c998a388b652-kube-api-access-4j669\") pod \"service-ca-operator-777779d784-4bw4h\" (UID: \"dfcd7a67-a498-4c5b-82e9-c998a388b652\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-4bw4h" Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.625716 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-47ptm\" (UniqueName: \"kubernetes.io/projected/e2755732-9869-46c9-be0e-e1fc77aa6644-kube-api-access-47ptm\") pod \"controller-manager-879f6c89f-5pm4x\" (UID: \"e2755732-9869-46c9-be0e-e1fc77aa6644\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5pm4x" Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.649632 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vkc7z\" (UniqueName: \"kubernetes.io/projected/22c9ec1f-50e2-455a-81d3-70a1df82c3ce-kube-api-access-vkc7z\") pod \"openshift-apiserver-operator-796bbdcf4f-qkmck\" (UID: \"22c9ec1f-50e2-455a-81d3-70a1df82c3ce\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qkmck" Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.662582 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kpxjs\" (UniqueName: \"kubernetes.io/projected/e9fdda75-f32d-445e-9658-d135d0a548ae-kube-api-access-kpxjs\") pod \"apiserver-7bbb656c7d-72h2c\" (UID: \"e9fdda75-f32d-445e-9658-d135d0a548ae\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-72h2c" Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.670554 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-5pm4x" Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.687107 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8bdlf\" (UniqueName: \"kubernetes.io/projected/bb29fa4c-cfd8-45b5-a0d3-895fb20d5cb8-kube-api-access-8bdlf\") pod \"downloads-7954f5f757-7jhbl\" (UID: \"bb29fa4c-cfd8-45b5-a0d3-895fb20d5cb8\") " pod="openshift-console/downloads-7954f5f757-7jhbl" Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.696832 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-7jhbl" Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.713816 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-864j7\" (UniqueName: \"kubernetes.io/projected/25198fe7-40d9-4add-871c-8f9adadddb75-kube-api-access-864j7\") pod \"console-operator-58897d9998-z98jb\" (UID: \"25198fe7-40d9-4add-871c-8f9adadddb75\") " pod="openshift-console-operator/console-operator-58897d9998-z98jb" Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.725376 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m74kk\" (UniqueName: \"kubernetes.io/projected/9b4d3b1f-5c64-4f18-91ca-d70893516609-kube-api-access-m74kk\") pod \"oauth-openshift-558db77b4-ggrbw\" (UID: \"9b4d3b1f-5c64-4f18-91ca-d70893516609\") " pod="openshift-authentication/oauth-openshift-558db77b4-ggrbw" Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.744880 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-72h2c" Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.749452 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d6lsx\" (UniqueName: \"kubernetes.io/projected/6069f0ea-bc74-4858-a564-156202fbe36d-kube-api-access-d6lsx\") pod \"ingress-operator-5b745b69d9-p6f2d\" (UID: \"6069f0ea-bc74-4858-a564-156202fbe36d\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-p6f2d" Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.755281 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qkmck" Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.768652 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-4bw4h" Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.780295 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dfvp5\" (UniqueName: \"kubernetes.io/projected/f3cae218-7d6a-42c0-a269-ad8d27dfed75-kube-api-access-dfvp5\") pod \"openshift-config-operator-7777fb866f-9rc8w\" (UID: \"f3cae218-7d6a-42c0-a269-ad8d27dfed75\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-9rc8w" Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.792306 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-shtvf\" (UniqueName: \"kubernetes.io/projected/576691ca-af69-46de-b8c2-5b2195e3db0b-kube-api-access-shtvf\") pod \"machine-api-operator-5694c8668f-w26jv\" (UID: \"576691ca-af69-46de-b8c2-5b2195e3db0b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-w26jv" Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.808775 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6069f0ea-bc74-4858-a564-156202fbe36d-bound-sa-token\") pod \"ingress-operator-5b745b69d9-p6f2d\" (UID: \"6069f0ea-bc74-4858-a564-156202fbe36d\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-p6f2d" Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.829032 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ktl5t\" (UniqueName: \"kubernetes.io/projected/aaaa8cb6-0a5e-4dd2-b549-f449a6e964cd-kube-api-access-ktl5t\") pod \"authentication-operator-69f744f599-9jc2h\" (UID: \"aaaa8cb6-0a5e-4dd2-b549-f449a6e964cd\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-9jc2h" Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.847568 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jrqqk\" (UniqueName: \"kubernetes.io/projected/04f53da9-c9c4-4af6-a8e1-37e91549a81a-kube-api-access-jrqqk\") pod \"apiserver-76f77b778f-dn6hh\" (UID: \"04f53da9-c9c4-4af6-a8e1-37e91549a81a\") " pod="openshift-apiserver/apiserver-76f77b778f-dn6hh" Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.868052 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kcbq8\" (UniqueName: \"kubernetes.io/projected/12eb63da-c25d-4d6a-84e5-070b0c9bd1aa-kube-api-access-kcbq8\") pod \"cluster-samples-operator-665b6dd947-jpq5h\" (UID: \"12eb63da-c25d-4d6a-84e5-070b0c9bd1aa\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-jpq5h" Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.887517 5133 request.go:700] Waited for 1.954140733s due to client-side throttling, not priority and fairness, request: POST:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-cluster-machine-approver/serviceaccounts/machine-approver-sa/token Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.905809 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jkl5p\" (UniqueName: \"kubernetes.io/projected/d1ec861e-fbe3-412e-9885-43a9e3c5be1e-kube-api-access-jkl5p\") pod \"console-f9d7485db-m54n8\" (UID: \"d1ec861e-fbe3-412e-9885-43a9e3c5be1e\") " pod="openshift-console/console-f9d7485db-m54n8" Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.909080 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9jqz6\" (UniqueName: \"kubernetes.io/projected/f7d3e23a-de5e-4562-9142-16955fe746ad-kube-api-access-9jqz6\") pod \"machine-approver-56656f9798-65hwm\" (UID: \"f7d3e23a-de5e-4562-9142-16955fe746ad\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-65hwm" Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.909680 5133 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.923178 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-5pm4x"] Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.928927 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.944524 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-dn6hh" Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.950224 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.953200 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-ggrbw" Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.965190 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-z98jb" Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.971491 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.980725 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-w26jv" Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.987695 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-9jc2h" Nov 21 13:44:59 crc kubenswrapper[5133]: I1121 13:44:59.989937 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.005602 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-jpq5h" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.010164 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.029290 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.050100 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.050488 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-4bw4h"] Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.060189 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-m54n8" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.068760 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.077511 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-9rc8w" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.085333 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-p6f2d" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.101861 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-72h2c"] Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.126354 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rnxp6\" (UniqueName: \"kubernetes.io/projected/a06ef925-e180-4dd7-b57f-3d3017be6bd0-kube-api-access-rnxp6\") pod \"package-server-manager-789f6589d5-bhlrh\" (UID: \"a06ef925-e180-4dd7-b57f-3d3017be6bd0\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-bhlrh" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.143789 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2dbqh\" (UniqueName: \"kubernetes.io/projected/5686b316-695d-4eab-a619-1033b90d6d96-kube-api-access-2dbqh\") pod \"control-plane-machine-set-operator-78cbb6b69f-sspkx\" (UID: \"5686b316-695d-4eab-a619-1033b90d6d96\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-sspkx" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.155850 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-sspkx" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.161530 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395530-jlnll"] Nov 21 13:45:00 crc kubenswrapper[5133]: E1121 13:45:00.166608 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[config-volume kube-api-access-sz95q secret-volume], unattached volumes=[], failed to process volumes=[config-volume kube-api-access-sz95q secret-volume]: context canceled" pod="openshift-operator-lifecycle-manager/collect-profiles-29395530-jlnll" podUID="0da7ec78-a448-44eb-b829-20cf97a0bbcf" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.168820 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gsp84\" (UniqueName: \"kubernetes.io/projected/8bbf68aa-448e-453c-8df2-839594103920-kube-api-access-gsp84\") pod \"marketplace-operator-79b997595-zd8tq\" (UID: \"8bbf68aa-448e-453c-8df2-839594103920\") " pod="openshift-marketplace/marketplace-operator-79b997595-zd8tq" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.187196 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395545-wrdtj"] Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.192316 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-65hwm" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.195356 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395545-wrdtj" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.196425 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395545-wrdtj"] Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.201774 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m65sl\" (UniqueName: \"kubernetes.io/projected/9c99dec9-4fc0-4576-81e4-703a690f4f45-kube-api-access-m65sl\") pod \"machine-config-operator-74547568cd-lz2px\" (UID: \"9c99dec9-4fc0-4576-81e4-703a690f4f45\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-lz2px" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.204564 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-7jhbl"] Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.210594 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5gmp9\" (UniqueName: \"kubernetes.io/projected/f49021c5-9619-47e6-a758-d800793dae08-kube-api-access-5gmp9\") pod \"etcd-operator-b45778765-6fn5b\" (UID: \"f49021c5-9619-47e6-a758-d800793dae08\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6fn5b" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.232743 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fq2xb\" (UniqueName: \"kubernetes.io/projected/589fc21a-acb4-4311-9e79-f9ad27c6f187-kube-api-access-fq2xb\") pod \"ingress-canary-9mtt2\" (UID: \"589fc21a-acb4-4311-9e79-f9ad27c6f187\") " pod="openshift-ingress-canary/ingress-canary-9mtt2" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.248213 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7a05b22-40d7-44b3-ab76-5b1548e042ba-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-mghzk\" (UID: \"e7a05b22-40d7-44b3-ab76-5b1548e042ba\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-mghzk" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.253382 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-llzdq\" (UniqueName: \"kubernetes.io/projected/b647f6d6-6425-43f9-9e70-3a66dee6f37c-kube-api-access-llzdq\") pod \"machine-config-controller-84d6567774-mp878\" (UID: \"b647f6d6-6425-43f9-9e70-3a66dee6f37c\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-mp878" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.279280 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8ghdt\" (UniqueName: \"kubernetes.io/projected/dc4b9d28-3c54-466b-b41d-0f1381490b02-kube-api-access-8ghdt\") pod \"router-default-5444994796-x7zf8\" (UID: \"dc4b9d28-3c54-466b-b41d-0f1381490b02\") " pod="openshift-ingress/router-default-5444994796-x7zf8" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.279780 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9e90fcb6-b3a1-4545-8d65-c9e13a61828b-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-q7g4h\" (UID: \"9e90fcb6-b3a1-4545-8d65-c9e13a61828b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-q7g4h" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.279810 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/dc4b9d28-3c54-466b-b41d-0f1381490b02-service-ca-bundle\") pod \"router-default-5444994796-x7zf8\" (UID: \"dc4b9d28-3c54-466b-b41d-0f1381490b02\") " pod="openshift-ingress/router-default-5444994796-x7zf8" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.279896 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sn4bt\" (UniqueName: \"kubernetes.io/projected/5c6271e9-d951-4d54-9db4-1b1df5c9d3f1-kube-api-access-sn4bt\") pod \"dns-operator-744455d44c-ntz66\" (UID: \"5c6271e9-d951-4d54-9db4-1b1df5c9d3f1\") " pod="openshift-dns-operator/dns-operator-744455d44c-ntz66" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.279947 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9e90fcb6-b3a1-4545-8d65-c9e13a61828b-config\") pod \"kube-controller-manager-operator-78b949d7b-q7g4h\" (UID: \"9e90fcb6-b3a1-4545-8d65-c9e13a61828b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-q7g4h" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.279970 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0da7ec78-a448-44eb-b829-20cf97a0bbcf-config-volume\") pod \"collect-profiles-29395530-jlnll\" (UID: \"0da7ec78-a448-44eb-b829-20cf97a0bbcf\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395530-jlnll" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.280087 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ghzsh\" (UniqueName: \"kubernetes.io/projected/f1a068fb-7569-4abd-a6cc-1eef000fd386-kube-api-access-ghzsh\") pod \"olm-operator-6b444d44fb-f9kbn\" (UID: \"f1a068fb-7569-4abd-a6cc-1eef000fd386\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-f9kbn" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.280129 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4b5e9384-1ed0-4b0f-8a0a-ea288a701e4a-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-j9kwz\" (UID: \"4b5e9384-1ed0-4b0f-8a0a-ea288a701e4a\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-j9kwz" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.280172 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9x9mk\" (UniqueName: \"kubernetes.io/projected/8ccfd073-ef60-4c22-a8f9-80c50ed1ca42-kube-api-access-9x9mk\") pod \"multus-admission-controller-857f4d67dd-ddmrq\" (UID: \"8ccfd073-ef60-4c22-a8f9-80c50ed1ca42\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-ddmrq" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.280234 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/e38db8a2-e4d9-49c7-9f87-81518e834b23-tmpfs\") pod \"packageserver-d55dfcdfc-p929b\" (UID: \"e38db8a2-e4d9-49c7-9f87-81518e834b23\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-p929b" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.280259 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/e38db8a2-e4d9-49c7-9f87-81518e834b23-webhook-cert\") pod \"packageserver-d55dfcdfc-p929b\" (UID: \"e38db8a2-e4d9-49c7-9f87-81518e834b23\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-p929b" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.280282 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d7fb3acb-7ca2-40c1-bffa-beb98c676906-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-7jqbx\" (UID: \"d7fb3acb-7ca2-40c1-bffa-beb98c676906\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-7jqbx" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.280320 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f1a068fb-7569-4abd-a6cc-1eef000fd386-profile-collector-cert\") pod \"olm-operator-6b444d44fb-f9kbn\" (UID: \"f1a068fb-7569-4abd-a6cc-1eef000fd386\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-f9kbn" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.280339 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/cc9dbfec-ae22-44bf-9464-1be9c6e20833-profile-collector-cert\") pod \"catalog-operator-68c6474976-sjflw\" (UID: \"cc9dbfec-ae22-44bf-9464-1be9c6e20833\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-sjflw" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.280389 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/fc3cef5b-e125-43c6-be9e-52ae6617e01a-registry-certificates\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.280407 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/89d95e05-f94e-4397-9cf2-54c67401b6e2-signing-key\") pod \"service-ca-9c57cc56f-zfc95\" (UID: \"89d95e05-f94e-4397-9cf2-54c67401b6e2\") " pod="openshift-service-ca/service-ca-9c57cc56f-zfc95" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.280433 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d7fb3acb-7ca2-40c1-bffa-beb98c676906-config\") pod \"kube-apiserver-operator-766d6c64bb-7jqbx\" (UID: \"d7fb3acb-7ca2-40c1-bffa-beb98c676906\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-7jqbx" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.280500 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/5c6271e9-d951-4d54-9db4-1b1df5c9d3f1-metrics-tls\") pod \"dns-operator-744455d44c-ntz66\" (UID: \"5c6271e9-d951-4d54-9db4-1b1df5c9d3f1\") " pod="openshift-dns-operator/dns-operator-744455d44c-ntz66" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.280546 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/cc9dbfec-ae22-44bf-9464-1be9c6e20833-srv-cert\") pod \"catalog-operator-68c6474976-sjflw\" (UID: \"cc9dbfec-ae22-44bf-9464-1be9c6e20833\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-sjflw" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.280588 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.280606 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pzjft\" (UniqueName: \"kubernetes.io/projected/3923f6ff-3c6d-42d8-8000-91004cf58dd4-kube-api-access-pzjft\") pod \"migrator-59844c95c7-dvnq8\" (UID: \"3923f6ff-3c6d-42d8-8000-91004cf58dd4\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-dvnq8" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.280636 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vcrtn\" (UniqueName: \"kubernetes.io/projected/89d95e05-f94e-4397-9cf2-54c67401b6e2-kube-api-access-vcrtn\") pod \"service-ca-9c57cc56f-zfc95\" (UID: \"89d95e05-f94e-4397-9cf2-54c67401b6e2\") " pod="openshift-service-ca/service-ca-9c57cc56f-zfc95" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.280653 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4b5e9384-1ed0-4b0f-8a0a-ea288a701e4a-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-j9kwz\" (UID: \"4b5e9384-1ed0-4b0f-8a0a-ea288a701e4a\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-j9kwz" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.280680 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9e90fcb6-b3a1-4545-8d65-c9e13a61828b-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-q7g4h\" (UID: \"9e90fcb6-b3a1-4545-8d65-c9e13a61828b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-q7g4h" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.280706 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/e38db8a2-e4d9-49c7-9f87-81518e834b23-apiservice-cert\") pod \"packageserver-d55dfcdfc-p929b\" (UID: \"e38db8a2-e4d9-49c7-9f87-81518e834b23\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-p929b" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.280724 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/dc4b9d28-3c54-466b-b41d-0f1381490b02-stats-auth\") pod \"router-default-5444994796-x7zf8\" (UID: \"dc4b9d28-3c54-466b-b41d-0f1381490b02\") " pod="openshift-ingress/router-default-5444994796-x7zf8" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.280740 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lfcb9\" (UniqueName: \"kubernetes.io/projected/4b5e9384-1ed0-4b0f-8a0a-ea288a701e4a-kube-api-access-lfcb9\") pod \"kube-storage-version-migrator-operator-b67b599dd-j9kwz\" (UID: \"4b5e9384-1ed0-4b0f-8a0a-ea288a701e4a\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-j9kwz" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.280762 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/fc3cef5b-e125-43c6-be9e-52ae6617e01a-installation-pull-secrets\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.280779 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0da7ec78-a448-44eb-b829-20cf97a0bbcf-secret-volume\") pod \"collect-profiles-29395530-jlnll\" (UID: \"0da7ec78-a448-44eb-b829-20cf97a0bbcf\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395530-jlnll" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.280800 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/8ccfd073-ef60-4c22-a8f9-80c50ed1ca42-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-ddmrq\" (UID: \"8ccfd073-ef60-4c22-a8f9-80c50ed1ca42\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-ddmrq" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.280828 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/fc3cef5b-e125-43c6-be9e-52ae6617e01a-bound-sa-token\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.280858 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f1a068fb-7569-4abd-a6cc-1eef000fd386-srv-cert\") pod \"olm-operator-6b444d44fb-f9kbn\" (UID: \"f1a068fb-7569-4abd-a6cc-1eef000fd386\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-f9kbn" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.280879 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wtn6b\" (UniqueName: \"kubernetes.io/projected/e38db8a2-e4d9-49c7-9f87-81518e834b23-kube-api-access-wtn6b\") pod \"packageserver-d55dfcdfc-p929b\" (UID: \"e38db8a2-e4d9-49c7-9f87-81518e834b23\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-p929b" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.280944 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/fc3cef5b-e125-43c6-be9e-52ae6617e01a-registry-tls\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.281076 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gq6sb\" (UniqueName: \"kubernetes.io/projected/fc3cef5b-e125-43c6-be9e-52ae6617e01a-kube-api-access-gq6sb\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.281128 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/89d95e05-f94e-4397-9cf2-54c67401b6e2-signing-cabundle\") pod \"service-ca-9c57cc56f-zfc95\" (UID: \"89d95e05-f94e-4397-9cf2-54c67401b6e2\") " pod="openshift-service-ca/service-ca-9c57cc56f-zfc95" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.281150 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/dc4b9d28-3c54-466b-b41d-0f1381490b02-metrics-certs\") pod \"router-default-5444994796-x7zf8\" (UID: \"dc4b9d28-3c54-466b-b41d-0f1381490b02\") " pod="openshift-ingress/router-default-5444994796-x7zf8" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.281216 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fc3cef5b-e125-43c6-be9e-52ae6617e01a-trusted-ca\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.281259 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/fc3cef5b-e125-43c6-be9e-52ae6617e01a-ca-trust-extracted\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.281283 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d7fb3acb-7ca2-40c1-bffa-beb98c676906-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-7jqbx\" (UID: \"d7fb3acb-7ca2-40c1-bffa-beb98c676906\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-7jqbx" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.281334 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j6zl7\" (UniqueName: \"kubernetes.io/projected/cc9dbfec-ae22-44bf-9464-1be9c6e20833-kube-api-access-j6zl7\") pod \"catalog-operator-68c6474976-sjflw\" (UID: \"cc9dbfec-ae22-44bf-9464-1be9c6e20833\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-sjflw" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.281364 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/dc4b9d28-3c54-466b-b41d-0f1381490b02-default-certificate\") pod \"router-default-5444994796-x7zf8\" (UID: \"dc4b9d28-3c54-466b-b41d-0f1381490b02\") " pod="openshift-ingress/router-default-5444994796-x7zf8" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.281785 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sz95q\" (UniqueName: \"kubernetes.io/projected/0da7ec78-a448-44eb-b829-20cf97a0bbcf-kube-api-access-sz95q\") pod \"collect-profiles-29395530-jlnll\" (UID: \"0da7ec78-a448-44eb-b829-20cf97a0bbcf\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395530-jlnll" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.283252 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-9mtt2" Nov 21 13:45:00 crc kubenswrapper[5133]: E1121 13:45:00.283451 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 13:45:00.783435186 +0000 UTC m=+160.581267434 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5rjt4" (UID: "fc3cef5b-e125-43c6-be9e-52ae6617e01a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.332240 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-ggrbw"] Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.358544 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qkmck"] Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.382465 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.382620 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9x9mk\" (UniqueName: \"kubernetes.io/projected/8ccfd073-ef60-4c22-a8f9-80c50ed1ca42-kube-api-access-9x9mk\") pod \"multus-admission-controller-857f4d67dd-ddmrq\" (UID: \"8ccfd073-ef60-4c22-a8f9-80c50ed1ca42\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-ddmrq" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.382646 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hv2qd\" (UniqueName: \"kubernetes.io/projected/5ff7be36-18ee-41ff-b8b6-4ea13387d7f2-kube-api-access-hv2qd\") pod \"dns-default-9qx4w\" (UID: \"5ff7be36-18ee-41ff-b8b6-4ea13387d7f2\") " pod="openshift-dns/dns-default-9qx4w" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.382675 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/e38db8a2-e4d9-49c7-9f87-81518e834b23-tmpfs\") pod \"packageserver-d55dfcdfc-p929b\" (UID: \"e38db8a2-e4d9-49c7-9f87-81518e834b23\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-p929b" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.382690 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/e38db8a2-e4d9-49c7-9f87-81518e834b23-webhook-cert\") pod \"packageserver-d55dfcdfc-p929b\" (UID: \"e38db8a2-e4d9-49c7-9f87-81518e834b23\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-p929b" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.382716 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d7fb3acb-7ca2-40c1-bffa-beb98c676906-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-7jqbx\" (UID: \"d7fb3acb-7ca2-40c1-bffa-beb98c676906\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-7jqbx" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.382734 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5ff7be36-18ee-41ff-b8b6-4ea13387d7f2-config-volume\") pod \"dns-default-9qx4w\" (UID: \"5ff7be36-18ee-41ff-b8b6-4ea13387d7f2\") " pod="openshift-dns/dns-default-9qx4w" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.382771 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f1a068fb-7569-4abd-a6cc-1eef000fd386-profile-collector-cert\") pod \"olm-operator-6b444d44fb-f9kbn\" (UID: \"f1a068fb-7569-4abd-a6cc-1eef000fd386\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-f9kbn" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.382787 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/cc9dbfec-ae22-44bf-9464-1be9c6e20833-profile-collector-cert\") pod \"catalog-operator-68c6474976-sjflw\" (UID: \"cc9dbfec-ae22-44bf-9464-1be9c6e20833\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-sjflw" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.382812 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/89d95e05-f94e-4397-9cf2-54c67401b6e2-signing-key\") pod \"service-ca-9c57cc56f-zfc95\" (UID: \"89d95e05-f94e-4397-9cf2-54c67401b6e2\") " pod="openshift-service-ca/service-ca-9c57cc56f-zfc95" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.382840 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/fc3cef5b-e125-43c6-be9e-52ae6617e01a-registry-certificates\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.382863 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d7fb3acb-7ca2-40c1-bffa-beb98c676906-config\") pod \"kube-apiserver-operator-766d6c64bb-7jqbx\" (UID: \"d7fb3acb-7ca2-40c1-bffa-beb98c676906\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-7jqbx" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.382889 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/5c6271e9-d951-4d54-9db4-1b1df5c9d3f1-metrics-tls\") pod \"dns-operator-744455d44c-ntz66\" (UID: \"5c6271e9-d951-4d54-9db4-1b1df5c9d3f1\") " pod="openshift-dns-operator/dns-operator-744455d44c-ntz66" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.382922 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/cc9dbfec-ae22-44bf-9464-1be9c6e20833-srv-cert\") pod \"catalog-operator-68c6474976-sjflw\" (UID: \"cc9dbfec-ae22-44bf-9464-1be9c6e20833\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-sjflw" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.382966 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pzjft\" (UniqueName: \"kubernetes.io/projected/3923f6ff-3c6d-42d8-8000-91004cf58dd4-kube-api-access-pzjft\") pod \"migrator-59844c95c7-dvnq8\" (UID: \"3923f6ff-3c6d-42d8-8000-91004cf58dd4\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-dvnq8" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.382981 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vcrtn\" (UniqueName: \"kubernetes.io/projected/89d95e05-f94e-4397-9cf2-54c67401b6e2-kube-api-access-vcrtn\") pod \"service-ca-9c57cc56f-zfc95\" (UID: \"89d95e05-f94e-4397-9cf2-54c67401b6e2\") " pod="openshift-service-ca/service-ca-9c57cc56f-zfc95" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.383050 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4b5e9384-1ed0-4b0f-8a0a-ea288a701e4a-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-j9kwz\" (UID: \"4b5e9384-1ed0-4b0f-8a0a-ea288a701e4a\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-j9kwz" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.383077 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9e90fcb6-b3a1-4545-8d65-c9e13a61828b-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-q7g4h\" (UID: \"9e90fcb6-b3a1-4545-8d65-c9e13a61828b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-q7g4h" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.383094 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/e38db8a2-e4d9-49c7-9f87-81518e834b23-apiservice-cert\") pod \"packageserver-d55dfcdfc-p929b\" (UID: \"e38db8a2-e4d9-49c7-9f87-81518e834b23\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-p929b" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.383110 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/a84abf7e-43b7-4483-a08e-270294215dff-node-bootstrap-token\") pod \"machine-config-server-m4jcp\" (UID: \"a84abf7e-43b7-4483-a08e-270294215dff\") " pod="openshift-machine-config-operator/machine-config-server-m4jcp" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.383129 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/dc4b9d28-3c54-466b-b41d-0f1381490b02-stats-auth\") pod \"router-default-5444994796-x7zf8\" (UID: \"dc4b9d28-3c54-466b-b41d-0f1381490b02\") " pod="openshift-ingress/router-default-5444994796-x7zf8" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.383146 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lfcb9\" (UniqueName: \"kubernetes.io/projected/4b5e9384-1ed0-4b0f-8a0a-ea288a701e4a-kube-api-access-lfcb9\") pod \"kube-storage-version-migrator-operator-b67b599dd-j9kwz\" (UID: \"4b5e9384-1ed0-4b0f-8a0a-ea288a701e4a\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-j9kwz" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.383165 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/63aa8ca7-9e72-4f3c-85e9-b9a423371b21-csi-data-dir\") pod \"csi-hostpathplugin-9zjjz\" (UID: \"63aa8ca7-9e72-4f3c-85e9-b9a423371b21\") " pod="hostpath-provisioner/csi-hostpathplugin-9zjjz" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.383220 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/fc3cef5b-e125-43c6-be9e-52ae6617e01a-installation-pull-secrets\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.383238 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0da7ec78-a448-44eb-b829-20cf97a0bbcf-secret-volume\") pod \"collect-profiles-29395530-jlnll\" (UID: \"0da7ec78-a448-44eb-b829-20cf97a0bbcf\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395530-jlnll" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.383254 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/af22fa12-851f-4ec2-81f1-b3df1186e00c-config-volume\") pod \"collect-profiles-29395545-wrdtj\" (UID: \"af22fa12-851f-4ec2-81f1-b3df1186e00c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395545-wrdtj" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.383270 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xk828\" (UniqueName: \"kubernetes.io/projected/af22fa12-851f-4ec2-81f1-b3df1186e00c-kube-api-access-xk828\") pod \"collect-profiles-29395545-wrdtj\" (UID: \"af22fa12-851f-4ec2-81f1-b3df1186e00c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395545-wrdtj" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.383289 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/fc3cef5b-e125-43c6-be9e-52ae6617e01a-bound-sa-token\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.383304 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/8ccfd073-ef60-4c22-a8f9-80c50ed1ca42-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-ddmrq\" (UID: \"8ccfd073-ef60-4c22-a8f9-80c50ed1ca42\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-ddmrq" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.383381 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f1a068fb-7569-4abd-a6cc-1eef000fd386-srv-cert\") pod \"olm-operator-6b444d44fb-f9kbn\" (UID: \"f1a068fb-7569-4abd-a6cc-1eef000fd386\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-f9kbn" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.383398 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wtn6b\" (UniqueName: \"kubernetes.io/projected/e38db8a2-e4d9-49c7-9f87-81518e834b23-kube-api-access-wtn6b\") pod \"packageserver-d55dfcdfc-p929b\" (UID: \"e38db8a2-e4d9-49c7-9f87-81518e834b23\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-p929b" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.383423 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/fc3cef5b-e125-43c6-be9e-52ae6617e01a-registry-tls\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.383448 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gq6sb\" (UniqueName: \"kubernetes.io/projected/fc3cef5b-e125-43c6-be9e-52ae6617e01a-kube-api-access-gq6sb\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.383471 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/89d95e05-f94e-4397-9cf2-54c67401b6e2-signing-cabundle\") pod \"service-ca-9c57cc56f-zfc95\" (UID: \"89d95e05-f94e-4397-9cf2-54c67401b6e2\") " pod="openshift-service-ca/service-ca-9c57cc56f-zfc95" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.383488 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/dc4b9d28-3c54-466b-b41d-0f1381490b02-metrics-certs\") pod \"router-default-5444994796-x7zf8\" (UID: \"dc4b9d28-3c54-466b-b41d-0f1381490b02\") " pod="openshift-ingress/router-default-5444994796-x7zf8" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.383514 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fc3cef5b-e125-43c6-be9e-52ae6617e01a-trusted-ca\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.383597 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/fc3cef5b-e125-43c6-be9e-52ae6617e01a-ca-trust-extracted\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.383617 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d7fb3acb-7ca2-40c1-bffa-beb98c676906-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-7jqbx\" (UID: \"d7fb3acb-7ca2-40c1-bffa-beb98c676906\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-7jqbx" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.383634 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gs9sh\" (UniqueName: \"kubernetes.io/projected/a84abf7e-43b7-4483-a08e-270294215dff-kube-api-access-gs9sh\") pod \"machine-config-server-m4jcp\" (UID: \"a84abf7e-43b7-4483-a08e-270294215dff\") " pod="openshift-machine-config-operator/machine-config-server-m4jcp" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.383677 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j6zl7\" (UniqueName: \"kubernetes.io/projected/cc9dbfec-ae22-44bf-9464-1be9c6e20833-kube-api-access-j6zl7\") pod \"catalog-operator-68c6474976-sjflw\" (UID: \"cc9dbfec-ae22-44bf-9464-1be9c6e20833\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-sjflw" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.383692 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/af22fa12-851f-4ec2-81f1-b3df1186e00c-secret-volume\") pod \"collect-profiles-29395545-wrdtj\" (UID: \"af22fa12-851f-4ec2-81f1-b3df1186e00c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395545-wrdtj" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.383744 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/dc4b9d28-3c54-466b-b41d-0f1381490b02-default-certificate\") pod \"router-default-5444994796-x7zf8\" (UID: \"dc4b9d28-3c54-466b-b41d-0f1381490b02\") " pod="openshift-ingress/router-default-5444994796-x7zf8" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.383760 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/63aa8ca7-9e72-4f3c-85e9-b9a423371b21-mountpoint-dir\") pod \"csi-hostpathplugin-9zjjz\" (UID: \"63aa8ca7-9e72-4f3c-85e9-b9a423371b21\") " pod="hostpath-provisioner/csi-hostpathplugin-9zjjz" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.383776 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sz95q\" (UniqueName: \"kubernetes.io/projected/0da7ec78-a448-44eb-b829-20cf97a0bbcf-kube-api-access-sz95q\") pod \"collect-profiles-29395530-jlnll\" (UID: \"0da7ec78-a448-44eb-b829-20cf97a0bbcf\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395530-jlnll" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.383802 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8ghdt\" (UniqueName: \"kubernetes.io/projected/dc4b9d28-3c54-466b-b41d-0f1381490b02-kube-api-access-8ghdt\") pod \"router-default-5444994796-x7zf8\" (UID: \"dc4b9d28-3c54-466b-b41d-0f1381490b02\") " pod="openshift-ingress/router-default-5444994796-x7zf8" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.383827 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9e90fcb6-b3a1-4545-8d65-c9e13a61828b-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-q7g4h\" (UID: \"9e90fcb6-b3a1-4545-8d65-c9e13a61828b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-q7g4h" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.383852 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/dc4b9d28-3c54-466b-b41d-0f1381490b02-service-ca-bundle\") pod \"router-default-5444994796-x7zf8\" (UID: \"dc4b9d28-3c54-466b-b41d-0f1381490b02\") " pod="openshift-ingress/router-default-5444994796-x7zf8" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.383866 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/a84abf7e-43b7-4483-a08e-270294215dff-certs\") pod \"machine-config-server-m4jcp\" (UID: \"a84abf7e-43b7-4483-a08e-270294215dff\") " pod="openshift-machine-config-operator/machine-config-server-m4jcp" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.383882 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mqbbn\" (UniqueName: \"kubernetes.io/projected/63aa8ca7-9e72-4f3c-85e9-b9a423371b21-kube-api-access-mqbbn\") pod \"csi-hostpathplugin-9zjjz\" (UID: \"63aa8ca7-9e72-4f3c-85e9-b9a423371b21\") " pod="hostpath-provisioner/csi-hostpathplugin-9zjjz" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.383939 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sn4bt\" (UniqueName: \"kubernetes.io/projected/5c6271e9-d951-4d54-9db4-1b1df5c9d3f1-kube-api-access-sn4bt\") pod \"dns-operator-744455d44c-ntz66\" (UID: \"5c6271e9-d951-4d54-9db4-1b1df5c9d3f1\") " pod="openshift-dns-operator/dns-operator-744455d44c-ntz66" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.383956 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9e90fcb6-b3a1-4545-8d65-c9e13a61828b-config\") pod \"kube-controller-manager-operator-78b949d7b-q7g4h\" (UID: \"9e90fcb6-b3a1-4545-8d65-c9e13a61828b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-q7g4h" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.383983 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0da7ec78-a448-44eb-b829-20cf97a0bbcf-config-volume\") pod \"collect-profiles-29395530-jlnll\" (UID: \"0da7ec78-a448-44eb-b829-20cf97a0bbcf\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395530-jlnll" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.384088 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/63aa8ca7-9e72-4f3c-85e9-b9a423371b21-registration-dir\") pod \"csi-hostpathplugin-9zjjz\" (UID: \"63aa8ca7-9e72-4f3c-85e9-b9a423371b21\") " pod="hostpath-provisioner/csi-hostpathplugin-9zjjz" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.384117 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/5ff7be36-18ee-41ff-b8b6-4ea13387d7f2-metrics-tls\") pod \"dns-default-9qx4w\" (UID: \"5ff7be36-18ee-41ff-b8b6-4ea13387d7f2\") " pod="openshift-dns/dns-default-9qx4w" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.384146 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ghzsh\" (UniqueName: \"kubernetes.io/projected/f1a068fb-7569-4abd-a6cc-1eef000fd386-kube-api-access-ghzsh\") pod \"olm-operator-6b444d44fb-f9kbn\" (UID: \"f1a068fb-7569-4abd-a6cc-1eef000fd386\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-f9kbn" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.384171 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4b5e9384-1ed0-4b0f-8a0a-ea288a701e4a-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-j9kwz\" (UID: \"4b5e9384-1ed0-4b0f-8a0a-ea288a701e4a\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-j9kwz" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.384187 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/63aa8ca7-9e72-4f3c-85e9-b9a423371b21-socket-dir\") pod \"csi-hostpathplugin-9zjjz\" (UID: \"63aa8ca7-9e72-4f3c-85e9-b9a423371b21\") " pod="hostpath-provisioner/csi-hostpathplugin-9zjjz" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.384201 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/63aa8ca7-9e72-4f3c-85e9-b9a423371b21-plugins-dir\") pod \"csi-hostpathplugin-9zjjz\" (UID: \"63aa8ca7-9e72-4f3c-85e9-b9a423371b21\") " pod="hostpath-provisioner/csi-hostpathplugin-9zjjz" Nov 21 13:45:00 crc kubenswrapper[5133]: E1121 13:45:00.384444 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 13:45:00.884416017 +0000 UTC m=+160.682248405 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.385318 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/e38db8a2-e4d9-49c7-9f87-81518e834b23-tmpfs\") pod \"packageserver-d55dfcdfc-p929b\" (UID: \"e38db8a2-e4d9-49c7-9f87-81518e834b23\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-p929b" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.391411 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/dc4b9d28-3c54-466b-b41d-0f1381490b02-service-ca-bundle\") pod \"router-default-5444994796-x7zf8\" (UID: \"dc4b9d28-3c54-466b-b41d-0f1381490b02\") " pod="openshift-ingress/router-default-5444994796-x7zf8" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.394186 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/e38db8a2-e4d9-49c7-9f87-81518e834b23-webhook-cert\") pod \"packageserver-d55dfcdfc-p929b\" (UID: \"e38db8a2-e4d9-49c7-9f87-81518e834b23\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-p929b" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.412430 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/dc4b9d28-3c54-466b-b41d-0f1381490b02-default-certificate\") pod \"router-default-5444994796-x7zf8\" (UID: \"dc4b9d28-3c54-466b-b41d-0f1381490b02\") " pod="openshift-ingress/router-default-5444994796-x7zf8" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.415639 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9e90fcb6-b3a1-4545-8d65-c9e13a61828b-config\") pod \"kube-controller-manager-operator-78b949d7b-q7g4h\" (UID: \"9e90fcb6-b3a1-4545-8d65-c9e13a61828b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-q7g4h" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.415861 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4b5e9384-1ed0-4b0f-8a0a-ea288a701e4a-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-j9kwz\" (UID: \"4b5e9384-1ed0-4b0f-8a0a-ea288a701e4a\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-j9kwz" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.416031 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9e90fcb6-b3a1-4545-8d65-c9e13a61828b-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-q7g4h\" (UID: \"9e90fcb6-b3a1-4545-8d65-c9e13a61828b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-q7g4h" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.416427 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/fc3cef5b-e125-43c6-be9e-52ae6617e01a-ca-trust-extracted\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.417350 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f1a068fb-7569-4abd-a6cc-1eef000fd386-srv-cert\") pod \"olm-operator-6b444d44fb-f9kbn\" (UID: \"f1a068fb-7569-4abd-a6cc-1eef000fd386\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-f9kbn" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.417773 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/dc4b9d28-3c54-466b-b41d-0f1381490b02-metrics-certs\") pod \"router-default-5444994796-x7zf8\" (UID: \"dc4b9d28-3c54-466b-b41d-0f1381490b02\") " pod="openshift-ingress/router-default-5444994796-x7zf8" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.418450 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0da7ec78-a448-44eb-b829-20cf97a0bbcf-config-volume\") pod \"collect-profiles-29395530-jlnll\" (UID: \"0da7ec78-a448-44eb-b829-20cf97a0bbcf\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395530-jlnll" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.419124 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4b5e9384-1ed0-4b0f-8a0a-ea288a701e4a-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-j9kwz\" (UID: \"4b5e9384-1ed0-4b0f-8a0a-ea288a701e4a\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-j9kwz" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.420483 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-7jhbl" event={"ID":"bb29fa4c-cfd8-45b5-a0d3-895fb20d5cb8","Type":"ContainerStarted","Data":"fdd7d8f6409b16d81e730da5005bba7f2cfb0c89b6b9cc85e9bf5557e1010fc6"} Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.426921 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f1a068fb-7569-4abd-a6cc-1eef000fd386-profile-collector-cert\") pod \"olm-operator-6b444d44fb-f9kbn\" (UID: \"f1a068fb-7569-4abd-a6cc-1eef000fd386\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-f9kbn" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.433067 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/fc3cef5b-e125-43c6-be9e-52ae6617e01a-registry-certificates\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.433924 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d7fb3acb-7ca2-40c1-bffa-beb98c676906-config\") pod \"kube-apiserver-operator-766d6c64bb-7jqbx\" (UID: \"d7fb3acb-7ca2-40c1-bffa-beb98c676906\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-7jqbx" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.434693 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/cc9dbfec-ae22-44bf-9464-1be9c6e20833-srv-cert\") pod \"catalog-operator-68c6474976-sjflw\" (UID: \"cc9dbfec-ae22-44bf-9464-1be9c6e20833\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-sjflw" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.435269 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/cc9dbfec-ae22-44bf-9464-1be9c6e20833-profile-collector-cert\") pod \"catalog-operator-68c6474976-sjflw\" (UID: \"cc9dbfec-ae22-44bf-9464-1be9c6e20833\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-sjflw" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.438437 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/fc3cef5b-e125-43c6-be9e-52ae6617e01a-installation-pull-secrets\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.438502 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/5c6271e9-d951-4d54-9db4-1b1df5c9d3f1-metrics-tls\") pod \"dns-operator-744455d44c-ntz66\" (UID: \"5c6271e9-d951-4d54-9db4-1b1df5c9d3f1\") " pod="openshift-dns-operator/dns-operator-744455d44c-ntz66" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.439369 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/dc4b9d28-3c54-466b-b41d-0f1381490b02-stats-auth\") pod \"router-default-5444994796-x7zf8\" (UID: \"dc4b9d28-3c54-466b-b41d-0f1381490b02\") " pod="openshift-ingress/router-default-5444994796-x7zf8" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.442305 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-4bw4h" event={"ID":"dfcd7a67-a498-4c5b-82e9-c998a388b652","Type":"ContainerStarted","Data":"2a449e51ab5210e73bfed0b64dbe60fba8e0249ec42d3108d72899bd4cc35385"} Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.443160 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-bhlrh" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.443281 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fc3cef5b-e125-43c6-be9e-52ae6617e01a-trusted-ca\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.445596 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d7fb3acb-7ca2-40c1-bffa-beb98c676906-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-7jqbx\" (UID: \"d7fb3acb-7ca2-40c1-bffa-beb98c676906\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-7jqbx" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.446278 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-6fn5b" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.446586 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-zd8tq" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.449242 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/89d95e05-f94e-4397-9cf2-54c67401b6e2-signing-cabundle\") pod \"service-ca-9c57cc56f-zfc95\" (UID: \"89d95e05-f94e-4397-9cf2-54c67401b6e2\") " pod="openshift-service-ca/service-ca-9c57cc56f-zfc95" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.452039 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0da7ec78-a448-44eb-b829-20cf97a0bbcf-secret-volume\") pod \"collect-profiles-29395530-jlnll\" (UID: \"0da7ec78-a448-44eb-b829-20cf97a0bbcf\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395530-jlnll" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.457722 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/e38db8a2-e4d9-49c7-9f87-81518e834b23-apiservice-cert\") pod \"packageserver-d55dfcdfc-p929b\" (UID: \"e38db8a2-e4d9-49c7-9f87-81518e834b23\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-p929b" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.458754 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/89d95e05-f94e-4397-9cf2-54c67401b6e2-signing-key\") pod \"service-ca-9c57cc56f-zfc95\" (UID: \"89d95e05-f94e-4397-9cf2-54c67401b6e2\") " pod="openshift-service-ca/service-ca-9c57cc56f-zfc95" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.458806 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/fc3cef5b-e125-43c6-be9e-52ae6617e01a-registry-tls\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.460812 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/8ccfd073-ef60-4c22-a8f9-80c50ed1ca42-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-ddmrq\" (UID: \"8ccfd073-ef60-4c22-a8f9-80c50ed1ca42\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-ddmrq" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.489590 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wtn6b\" (UniqueName: \"kubernetes.io/projected/e38db8a2-e4d9-49c7-9f87-81518e834b23-kube-api-access-wtn6b\") pod \"packageserver-d55dfcdfc-p929b\" (UID: \"e38db8a2-e4d9-49c7-9f87-81518e834b23\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-p929b" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.491861 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d7fb3acb-7ca2-40c1-bffa-beb98c676906-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-7jqbx\" (UID: \"d7fb3acb-7ca2-40c1-bffa-beb98c676906\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-7jqbx" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.492151 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-lz2px" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.493955 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/a84abf7e-43b7-4483-a08e-270294215dff-certs\") pod \"machine-config-server-m4jcp\" (UID: \"a84abf7e-43b7-4483-a08e-270294215dff\") " pod="openshift-machine-config-operator/machine-config-server-m4jcp" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.493978 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mqbbn\" (UniqueName: \"kubernetes.io/projected/63aa8ca7-9e72-4f3c-85e9-b9a423371b21-kube-api-access-mqbbn\") pod \"csi-hostpathplugin-9zjjz\" (UID: \"63aa8ca7-9e72-4f3c-85e9-b9a423371b21\") " pod="hostpath-provisioner/csi-hostpathplugin-9zjjz" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.494041 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/63aa8ca7-9e72-4f3c-85e9-b9a423371b21-registration-dir\") pod \"csi-hostpathplugin-9zjjz\" (UID: \"63aa8ca7-9e72-4f3c-85e9-b9a423371b21\") " pod="hostpath-provisioner/csi-hostpathplugin-9zjjz" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.494058 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/5ff7be36-18ee-41ff-b8b6-4ea13387d7f2-metrics-tls\") pod \"dns-default-9qx4w\" (UID: \"5ff7be36-18ee-41ff-b8b6-4ea13387d7f2\") " pod="openshift-dns/dns-default-9qx4w" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.494082 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/63aa8ca7-9e72-4f3c-85e9-b9a423371b21-socket-dir\") pod \"csi-hostpathplugin-9zjjz\" (UID: \"63aa8ca7-9e72-4f3c-85e9-b9a423371b21\") " pod="hostpath-provisioner/csi-hostpathplugin-9zjjz" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.494096 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/63aa8ca7-9e72-4f3c-85e9-b9a423371b21-plugins-dir\") pod \"csi-hostpathplugin-9zjjz\" (UID: \"63aa8ca7-9e72-4f3c-85e9-b9a423371b21\") " pod="hostpath-provisioner/csi-hostpathplugin-9zjjz" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.494119 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hv2qd\" (UniqueName: \"kubernetes.io/projected/5ff7be36-18ee-41ff-b8b6-4ea13387d7f2-kube-api-access-hv2qd\") pod \"dns-default-9qx4w\" (UID: \"5ff7be36-18ee-41ff-b8b6-4ea13387d7f2\") " pod="openshift-dns/dns-default-9qx4w" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.494135 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5ff7be36-18ee-41ff-b8b6-4ea13387d7f2-config-volume\") pod \"dns-default-9qx4w\" (UID: \"5ff7be36-18ee-41ff-b8b6-4ea13387d7f2\") " pod="openshift-dns/dns-default-9qx4w" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.494174 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.494213 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/a84abf7e-43b7-4483-a08e-270294215dff-node-bootstrap-token\") pod \"machine-config-server-m4jcp\" (UID: \"a84abf7e-43b7-4483-a08e-270294215dff\") " pod="openshift-machine-config-operator/machine-config-server-m4jcp" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.494237 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/63aa8ca7-9e72-4f3c-85e9-b9a423371b21-csi-data-dir\") pod \"csi-hostpathplugin-9zjjz\" (UID: \"63aa8ca7-9e72-4f3c-85e9-b9a423371b21\") " pod="hostpath-provisioner/csi-hostpathplugin-9zjjz" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.494255 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/af22fa12-851f-4ec2-81f1-b3df1186e00c-config-volume\") pod \"collect-profiles-29395545-wrdtj\" (UID: \"af22fa12-851f-4ec2-81f1-b3df1186e00c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395545-wrdtj" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.494271 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xk828\" (UniqueName: \"kubernetes.io/projected/af22fa12-851f-4ec2-81f1-b3df1186e00c-kube-api-access-xk828\") pod \"collect-profiles-29395545-wrdtj\" (UID: \"af22fa12-851f-4ec2-81f1-b3df1186e00c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395545-wrdtj" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.494309 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gs9sh\" (UniqueName: \"kubernetes.io/projected/a84abf7e-43b7-4483-a08e-270294215dff-kube-api-access-gs9sh\") pod \"machine-config-server-m4jcp\" (UID: \"a84abf7e-43b7-4483-a08e-270294215dff\") " pod="openshift-machine-config-operator/machine-config-server-m4jcp" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.494334 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/af22fa12-851f-4ec2-81f1-b3df1186e00c-secret-volume\") pod \"collect-profiles-29395545-wrdtj\" (UID: \"af22fa12-851f-4ec2-81f1-b3df1186e00c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395545-wrdtj" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.494351 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/63aa8ca7-9e72-4f3c-85e9-b9a423371b21-mountpoint-dir\") pod \"csi-hostpathplugin-9zjjz\" (UID: \"63aa8ca7-9e72-4f3c-85e9-b9a423371b21\") " pod="hostpath-provisioner/csi-hostpathplugin-9zjjz" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.494428 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/63aa8ca7-9e72-4f3c-85e9-b9a423371b21-mountpoint-dir\") pod \"csi-hostpathplugin-9zjjz\" (UID: \"63aa8ca7-9e72-4f3c-85e9-b9a423371b21\") " pod="hostpath-provisioner/csi-hostpathplugin-9zjjz" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.495925 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5ff7be36-18ee-41ff-b8b6-4ea13387d7f2-config-volume\") pod \"dns-default-9qx4w\" (UID: \"5ff7be36-18ee-41ff-b8b6-4ea13387d7f2\") " pod="openshift-dns/dns-default-9qx4w" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.496565 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/63aa8ca7-9e72-4f3c-85e9-b9a423371b21-registration-dir\") pod \"csi-hostpathplugin-9zjjz\" (UID: \"63aa8ca7-9e72-4f3c-85e9-b9a423371b21\") " pod="hostpath-provisioner/csi-hostpathplugin-9zjjz" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.497565 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9x9mk\" (UniqueName: \"kubernetes.io/projected/8ccfd073-ef60-4c22-a8f9-80c50ed1ca42-kube-api-access-9x9mk\") pod \"multus-admission-controller-857f4d67dd-ddmrq\" (UID: \"8ccfd073-ef60-4c22-a8f9-80c50ed1ca42\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-ddmrq" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.497838 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/63aa8ca7-9e72-4f3c-85e9-b9a423371b21-plugins-dir\") pod \"csi-hostpathplugin-9zjjz\" (UID: \"63aa8ca7-9e72-4f3c-85e9-b9a423371b21\") " pod="hostpath-provisioner/csi-hostpathplugin-9zjjz" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.497890 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/63aa8ca7-9e72-4f3c-85e9-b9a423371b21-socket-dir\") pod \"csi-hostpathplugin-9zjjz\" (UID: \"63aa8ca7-9e72-4f3c-85e9-b9a423371b21\") " pod="hostpath-provisioner/csi-hostpathplugin-9zjjz" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.501753 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/63aa8ca7-9e72-4f3c-85e9-b9a423371b21-csi-data-dir\") pod \"csi-hostpathplugin-9zjjz\" (UID: \"63aa8ca7-9e72-4f3c-85e9-b9a423371b21\") " pod="hostpath-provisioner/csi-hostpathplugin-9zjjz" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.504568 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/5ff7be36-18ee-41ff-b8b6-4ea13387d7f2-metrics-tls\") pod \"dns-default-9qx4w\" (UID: \"5ff7be36-18ee-41ff-b8b6-4ea13387d7f2\") " pod="openshift-dns/dns-default-9qx4w" Nov 21 13:45:00 crc kubenswrapper[5133]: E1121 13:45:00.504707 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 13:45:01.004680384 +0000 UTC m=+160.802512822 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5rjt4" (UID: "fc3cef5b-e125-43c6-be9e-52ae6617e01a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.508250 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395530-jlnll" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.509761 5133 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-5pm4x container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" start-of-body= Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.509794 5133 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-5pm4x" podUID="e2755732-9869-46c9-be0e-e1fc77aa6644" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.510324 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-mghzk" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.518706 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-mp878" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.523526 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/af22fa12-851f-4ec2-81f1-b3df1186e00c-secret-volume\") pod \"collect-profiles-29395545-wrdtj\" (UID: \"af22fa12-851f-4ec2-81f1-b3df1186e00c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395545-wrdtj" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.526532 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/a84abf7e-43b7-4483-a08e-270294215dff-certs\") pod \"machine-config-server-m4jcp\" (UID: \"a84abf7e-43b7-4483-a08e-270294215dff\") " pod="openshift-machine-config-operator/machine-config-server-m4jcp" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.526830 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/a84abf7e-43b7-4483-a08e-270294215dff-node-bootstrap-token\") pod \"machine-config-server-m4jcp\" (UID: \"a84abf7e-43b7-4483-a08e-270294215dff\") " pod="openshift-machine-config-operator/machine-config-server-m4jcp" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.532108 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/af22fa12-851f-4ec2-81f1-b3df1186e00c-config-volume\") pod \"collect-profiles-29395545-wrdtj\" (UID: \"af22fa12-851f-4ec2-81f1-b3df1186e00c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395545-wrdtj" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.535745 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sz95q\" (UniqueName: \"kubernetes.io/projected/0da7ec78-a448-44eb-b829-20cf97a0bbcf-kube-api-access-sz95q\") pod \"collect-profiles-29395530-jlnll\" (UID: \"0da7ec78-a448-44eb-b829-20cf97a0bbcf\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395530-jlnll" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.537777 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8ghdt\" (UniqueName: \"kubernetes.io/projected/dc4b9d28-3c54-466b-b41d-0f1381490b02-kube-api-access-8ghdt\") pod \"router-default-5444994796-x7zf8\" (UID: \"dc4b9d28-3c54-466b-b41d-0f1381490b02\") " pod="openshift-ingress/router-default-5444994796-x7zf8" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.541129 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ghzsh\" (UniqueName: \"kubernetes.io/projected/f1a068fb-7569-4abd-a6cc-1eef000fd386-kube-api-access-ghzsh\") pod \"olm-operator-6b444d44fb-f9kbn\" (UID: \"f1a068fb-7569-4abd-a6cc-1eef000fd386\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-f9kbn" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.544469 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-ddmrq" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.567788 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sn4bt\" (UniqueName: \"kubernetes.io/projected/5c6271e9-d951-4d54-9db4-1b1df5c9d3f1-kube-api-access-sn4bt\") pod \"dns-operator-744455d44c-ntz66\" (UID: \"5c6271e9-d951-4d54-9db4-1b1df5c9d3f1\") " pod="openshift-dns-operator/dns-operator-744455d44c-ntz66" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.568598 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j6zl7\" (UniqueName: \"kubernetes.io/projected/cc9dbfec-ae22-44bf-9464-1be9c6e20833-kube-api-access-j6zl7\") pod \"catalog-operator-68c6474976-sjflw\" (UID: \"cc9dbfec-ae22-44bf-9464-1be9c6e20833\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-sjflw" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.570982 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395530-jlnll" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.572620 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-72h2c" event={"ID":"e9fdda75-f32d-445e-9658-d135d0a548ae","Type":"ContainerStarted","Data":"843c2877ed11106f5c9b8cf99fffe4e02ff3434676cb50b8fa4d943b6bd65142"} Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.572675 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-5pm4x" event={"ID":"e2755732-9869-46c9-be0e-e1fc77aa6644","Type":"ContainerStarted","Data":"da0645a2ed5de0ca552a83c927ae4d00e796fcfecb2572ceb222a87c04bb2539"} Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.572709 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-5pm4x" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.572722 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-5pm4x" event={"ID":"e2755732-9869-46c9-be0e-e1fc77aa6644","Type":"ContainerStarted","Data":"14e7b6bdc3eabc2c5b05eaca846925bc87e90895b58574bf56e750b5d80a81c2"} Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.593536 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-9rc8w"] Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.601927 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0da7ec78-a448-44eb-b829-20cf97a0bbcf-secret-volume\") pod \"0da7ec78-a448-44eb-b829-20cf97a0bbcf\" (UID: \"0da7ec78-a448-44eb-b829-20cf97a0bbcf\") " Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.602430 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sz95q\" (UniqueName: \"kubernetes.io/projected/0da7ec78-a448-44eb-b829-20cf97a0bbcf-kube-api-access-sz95q\") pod \"0da7ec78-a448-44eb-b829-20cf97a0bbcf\" (UID: \"0da7ec78-a448-44eb-b829-20cf97a0bbcf\") " Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.602671 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.602703 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0da7ec78-a448-44eb-b829-20cf97a0bbcf-config-volume\") pod \"0da7ec78-a448-44eb-b829-20cf97a0bbcf\" (UID: \"0da7ec78-a448-44eb-b829-20cf97a0bbcf\") " Nov 21 13:45:00 crc kubenswrapper[5133]: E1121 13:45:00.612789 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 13:45:01.112714384 +0000 UTC m=+160.910546772 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.614539 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0da7ec78-a448-44eb-b829-20cf97a0bbcf-config-volume" (OuterVolumeSpecName: "config-volume") pod "0da7ec78-a448-44eb-b829-20cf97a0bbcf" (UID: "0da7ec78-a448-44eb-b829-20cf97a0bbcf"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.617252 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gq6sb\" (UniqueName: \"kubernetes.io/projected/fc3cef5b-e125-43c6-be9e-52ae6617e01a-kube-api-access-gq6sb\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.622137 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0da7ec78-a448-44eb-b829-20cf97a0bbcf-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "0da7ec78-a448-44eb-b829-20cf97a0bbcf" (UID: "0da7ec78-a448-44eb-b829-20cf97a0bbcf"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.624264 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0da7ec78-a448-44eb-b829-20cf97a0bbcf-kube-api-access-sz95q" (OuterVolumeSpecName: "kube-api-access-sz95q") pod "0da7ec78-a448-44eb-b829-20cf97a0bbcf" (UID: "0da7ec78-a448-44eb-b829-20cf97a0bbcf"). InnerVolumeSpecName "kube-api-access-sz95q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.634142 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-jpq5h"] Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.634206 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-9jc2h"] Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.637059 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-dn6hh"] Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.642886 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-w26jv"] Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.650344 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-z98jb"] Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.661980 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vcrtn\" (UniqueName: \"kubernetes.io/projected/89d95e05-f94e-4397-9cf2-54c67401b6e2-kube-api-access-vcrtn\") pod \"service-ca-9c57cc56f-zfc95\" (UID: \"89d95e05-f94e-4397-9cf2-54c67401b6e2\") " pod="openshift-service-ca/service-ca-9c57cc56f-zfc95" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.663024 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9e90fcb6-b3a1-4545-8d65-c9e13a61828b-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-q7g4h\" (UID: \"9e90fcb6-b3a1-4545-8d65-c9e13a61828b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-q7g4h" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.676928 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pzjft\" (UniqueName: \"kubernetes.io/projected/3923f6ff-3c6d-42d8-8000-91004cf58dd4-kube-api-access-pzjft\") pod \"migrator-59844c95c7-dvnq8\" (UID: \"3923f6ff-3c6d-42d8-8000-91004cf58dd4\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-dvnq8" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.689451 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lfcb9\" (UniqueName: \"kubernetes.io/projected/4b5e9384-1ed0-4b0f-8a0a-ea288a701e4a-kube-api-access-lfcb9\") pod \"kube-storage-version-migrator-operator-b67b599dd-j9kwz\" (UID: \"4b5e9384-1ed0-4b0f-8a0a-ea288a701e4a\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-j9kwz" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.692487 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-7jqbx" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.708431 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-ntz66" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.712688 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.712996 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sz95q\" (UniqueName: \"kubernetes.io/projected/0da7ec78-a448-44eb-b829-20cf97a0bbcf-kube-api-access-sz95q\") on node \"crc\" DevicePath \"\"" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.713206 5133 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0da7ec78-a448-44eb-b829-20cf97a0bbcf-config-volume\") on node \"crc\" DevicePath \"\"" Nov 21 13:45:00 crc kubenswrapper[5133]: E1121 13:45:00.713531 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 13:45:01.21351379 +0000 UTC m=+161.011346038 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5rjt4" (UID: "fc3cef5b-e125-43c6-be9e-52ae6617e01a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.713826 5133 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0da7ec78-a448-44eb-b829-20cf97a0bbcf-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.713883 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/fc3cef5b-e125-43c6-be9e-52ae6617e01a-bound-sa-token\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.716862 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-p929b" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.723613 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-dvnq8" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.732314 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mqbbn\" (UniqueName: \"kubernetes.io/projected/63aa8ca7-9e72-4f3c-85e9-b9a423371b21-kube-api-access-mqbbn\") pod \"csi-hostpathplugin-9zjjz\" (UID: \"63aa8ca7-9e72-4f3c-85e9-b9a423371b21\") " pod="hostpath-provisioner/csi-hostpathplugin-9zjjz" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.739474 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-j9kwz" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.777858 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hv2qd\" (UniqueName: \"kubernetes.io/projected/5ff7be36-18ee-41ff-b8b6-4ea13387d7f2-kube-api-access-hv2qd\") pod \"dns-default-9qx4w\" (UID: \"5ff7be36-18ee-41ff-b8b6-4ea13387d7f2\") " pod="openshift-dns/dns-default-9qx4w" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.793243 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-x7zf8" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.801483 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-f9kbn" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.810646 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gs9sh\" (UniqueName: \"kubernetes.io/projected/a84abf7e-43b7-4483-a08e-270294215dff-kube-api-access-gs9sh\") pod \"machine-config-server-m4jcp\" (UID: \"a84abf7e-43b7-4483-a08e-270294215dff\") " pod="openshift-machine-config-operator/machine-config-server-m4jcp" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.810650 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zg29g" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.815672 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:45:00 crc kubenswrapper[5133]: E1121 13:45:00.817062 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 13:45:01.31704047 +0000 UTC m=+161.114872718 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.824736 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-q7g4h" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.829802 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xk828\" (UniqueName: \"kubernetes.io/projected/af22fa12-851f-4ec2-81f1-b3df1186e00c-kube-api-access-xk828\") pod \"collect-profiles-29395545-wrdtj\" (UID: \"af22fa12-851f-4ec2-81f1-b3df1186e00c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395545-wrdtj" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.838391 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-sjflw" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.918275 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:00 crc kubenswrapper[5133]: E1121 13:45:00.918750 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 13:45:01.41873255 +0000 UTC m=+161.216564798 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5rjt4" (UID: "fc3cef5b-e125-43c6-be9e-52ae6617e01a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.925670 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395545-wrdtj" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.926551 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-zfc95" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.982838 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-9zjjz" Nov 21 13:45:00 crc kubenswrapper[5133]: I1121 13:45:00.992069 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-9qx4w" Nov 21 13:45:01 crc kubenswrapper[5133]: I1121 13:45:01.003944 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-m4jcp" Nov 21 13:45:01 crc kubenswrapper[5133]: I1121 13:45:01.021602 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:45:01 crc kubenswrapper[5133]: E1121 13:45:01.021789 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 13:45:01.521759166 +0000 UTC m=+161.319591424 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:01 crc kubenswrapper[5133]: I1121 13:45:01.022118 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:01 crc kubenswrapper[5133]: E1121 13:45:01.022516 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 13:45:01.522502396 +0000 UTC m=+161.320334644 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5rjt4" (UID: "fc3cef5b-e125-43c6-be9e-52ae6617e01a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:01 crc kubenswrapper[5133]: I1121 13:45:01.083265 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-9mtt2"] Nov 21 13:45:01 crc kubenswrapper[5133]: I1121 13:45:01.117216 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-p6f2d"] Nov 21 13:45:01 crc kubenswrapper[5133]: I1121 13:45:01.118626 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-sspkx"] Nov 21 13:45:01 crc kubenswrapper[5133]: I1121 13:45:01.138190 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:45:01 crc kubenswrapper[5133]: E1121 13:45:01.138732 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 13:45:01.638709014 +0000 UTC m=+161.436541262 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:01 crc kubenswrapper[5133]: I1121 13:45:01.147216 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-m54n8"] Nov 21 13:45:01 crc kubenswrapper[5133]: I1121 13:45:01.240069 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:01 crc kubenswrapper[5133]: E1121 13:45:01.240838 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 13:45:01.740825706 +0000 UTC m=+161.538657954 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5rjt4" (UID: "fc3cef5b-e125-43c6-be9e-52ae6617e01a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:01 crc kubenswrapper[5133]: W1121 13:45:01.259243 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5686b316_695d_4eab_a619_1033b90d6d96.slice/crio-fddb8795aee9bec36ea51da08a295d1e92c83603270cbec97049a3989669d920 WatchSource:0}: Error finding container fddb8795aee9bec36ea51da08a295d1e92c83603270cbec97049a3989669d920: Status 404 returned error can't find the container with id fddb8795aee9bec36ea51da08a295d1e92c83603270cbec97049a3989669d920 Nov 21 13:45:01 crc kubenswrapper[5133]: W1121 13:45:01.261347 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6069f0ea_bc74_4858_a564_156202fbe36d.slice/crio-df8f297beaf499a7bc339f20476e82749790145c2fc2a29eefbad1c2aba2b0df WatchSource:0}: Error finding container df8f297beaf499a7bc339f20476e82749790145c2fc2a29eefbad1c2aba2b0df: Status 404 returned error can't find the container with id df8f297beaf499a7bc339f20476e82749790145c2fc2a29eefbad1c2aba2b0df Nov 21 13:45:01 crc kubenswrapper[5133]: I1121 13:45:01.322012 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-mp878"] Nov 21 13:45:01 crc kubenswrapper[5133]: I1121 13:45:01.341541 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:45:01 crc kubenswrapper[5133]: E1121 13:45:01.341854 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 13:45:01.841792047 +0000 UTC m=+161.639624305 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:01 crc kubenswrapper[5133]: I1121 13:45:01.341946 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:01 crc kubenswrapper[5133]: E1121 13:45:01.342324 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 13:45:01.84231007 +0000 UTC m=+161.640142318 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5rjt4" (UID: "fc3cef5b-e125-43c6-be9e-52ae6617e01a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:01 crc kubenswrapper[5133]: I1121 13:45:01.369430 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-z8962" podStartSLOduration=129.369409945 podStartE2EDuration="2m9.369409945s" podCreationTimestamp="2025-11-21 13:42:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 13:45:01.366425716 +0000 UTC m=+161.164257964" watchObservedRunningTime="2025-11-21 13:45:01.369409945 +0000 UTC m=+161.167242183" Nov 21 13:45:01 crc kubenswrapper[5133]: I1121 13:45:01.443119 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:45:01 crc kubenswrapper[5133]: E1121 13:45:01.443263 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 13:45:01.94322871 +0000 UTC m=+161.741060958 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:01 crc kubenswrapper[5133]: I1121 13:45:01.443828 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:01 crc kubenswrapper[5133]: E1121 13:45:01.444337 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 13:45:01.944325589 +0000 UTC m=+161.742157847 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5rjt4" (UID: "fc3cef5b-e125-43c6-be9e-52ae6617e01a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:01 crc kubenswrapper[5133]: I1121 13:45:01.547750 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:45:01 crc kubenswrapper[5133]: E1121 13:45:01.548284 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 13:45:02.048263089 +0000 UTC m=+161.846095337 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:01 crc kubenswrapper[5133]: I1121 13:45:01.566665 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-5pm4x" podStartSLOduration=129.566598149 podStartE2EDuration="2m9.566598149s" podCreationTimestamp="2025-11-21 13:42:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 13:45:01.521972175 +0000 UTC m=+161.319804413" watchObservedRunningTime="2025-11-21 13:45:01.566598149 +0000 UTC m=+161.364430397" Nov 21 13:45:01 crc kubenswrapper[5133]: I1121 13:45:01.579294 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-p6f2d" event={"ID":"6069f0ea-bc74-4858-a564-156202fbe36d","Type":"ContainerStarted","Data":"df8f297beaf499a7bc339f20476e82749790145c2fc2a29eefbad1c2aba2b0df"} Nov 21 13:45:01 crc kubenswrapper[5133]: I1121 13:45:01.581793 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-x7zf8" event={"ID":"dc4b9d28-3c54-466b-b41d-0f1381490b02","Type":"ContainerStarted","Data":"88170af0143715676b24f984588beb8a35e381e0260f1a30fff97079a6ce0e68"} Nov 21 13:45:01 crc kubenswrapper[5133]: I1121 13:45:01.583510 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-m4jcp" event={"ID":"a84abf7e-43b7-4483-a08e-270294215dff","Type":"ContainerStarted","Data":"3bc6a0fff0b53b681c5d524af8d3657c8d1b414a9ea4f64be07fb688fbb8de99"} Nov 21 13:45:01 crc kubenswrapper[5133]: I1121 13:45:01.616927 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-9jc2h" event={"ID":"aaaa8cb6-0a5e-4dd2-b549-f449a6e964cd","Type":"ContainerStarted","Data":"f53532393f586f37f728dfd77ea01deb1e3c1cbd77638d187ff1bbabe19b6594"} Nov 21 13:45:01 crc kubenswrapper[5133]: I1121 13:45:01.631958 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-mp878" event={"ID":"b647f6d6-6425-43f9-9e70-3a66dee6f37c","Type":"ContainerStarted","Data":"d672a64b192a40b0f86dbbf212598e013f0f0498f7f98a52d349a4f5ac3d5d04"} Nov 21 13:45:01 crc kubenswrapper[5133]: I1121 13:45:01.650223 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:01 crc kubenswrapper[5133]: E1121 13:45:01.650562 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 13:45:02.150550615 +0000 UTC m=+161.948382863 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5rjt4" (UID: "fc3cef5b-e125-43c6-be9e-52ae6617e01a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:01 crc kubenswrapper[5133]: I1121 13:45:01.653719 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-m54n8" event={"ID":"d1ec861e-fbe3-412e-9885-43a9e3c5be1e","Type":"ContainerStarted","Data":"103a5c92bab38bd984c4b01da37ab5c1990891fe96f56c880c7d691de38cd0b7"} Nov 21 13:45:01 crc kubenswrapper[5133]: I1121 13:45:01.661068 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-z98jb" event={"ID":"25198fe7-40d9-4add-871c-8f9adadddb75","Type":"ContainerStarted","Data":"c3c61bfc8410e55e3fde42b081c4e13438cb31d5ff92a0afda974e9483dbabc5"} Nov 21 13:45:01 crc kubenswrapper[5133]: I1121 13:45:01.662832 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-sspkx" event={"ID":"5686b316-695d-4eab-a619-1033b90d6d96","Type":"ContainerStarted","Data":"fddb8795aee9bec36ea51da08a295d1e92c83603270cbec97049a3989669d920"} Nov 21 13:45:01 crc kubenswrapper[5133]: I1121 13:45:01.682359 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-65hwm" event={"ID":"f7d3e23a-de5e-4562-9142-16955fe746ad","Type":"ContainerStarted","Data":"8d7d058267acdcea800a236c0178a89599c0e7ee543171fc3bb9b2532609e01b"} Nov 21 13:45:01 crc kubenswrapper[5133]: I1121 13:45:01.705025 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-ggrbw" event={"ID":"9b4d3b1f-5c64-4f18-91ca-d70893516609","Type":"ContainerStarted","Data":"bfac6c65d9fb4f6957b56b65ad1b59c97019aaa419e99d041d9455cd168a0a67"} Nov 21 13:45:01 crc kubenswrapper[5133]: I1121 13:45:01.708861 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-dn6hh" event={"ID":"04f53da9-c9c4-4af6-a8e1-37e91549a81a","Type":"ContainerStarted","Data":"98d93cf2c34e8a15d2d5d814f7d5d2b902064a33a769b8a2919ede6c980a705c"} Nov 21 13:45:01 crc kubenswrapper[5133]: I1121 13:45:01.710215 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-jpq5h" event={"ID":"12eb63da-c25d-4d6a-84e5-070b0c9bd1aa","Type":"ContainerStarted","Data":"4be812500820919e190f149ad3a5a6cf2a98926c571878b8b6b8ea3469c9f238"} Nov 21 13:45:01 crc kubenswrapper[5133]: I1121 13:45:01.712958 5133 generic.go:334] "Generic (PLEG): container finished" podID="e9fdda75-f32d-445e-9658-d135d0a548ae" containerID="e343495cab09c67be6faf05abf996fdb7fa745506e1e281fb3849accc55bbfae" exitCode=0 Nov 21 13:45:01 crc kubenswrapper[5133]: I1121 13:45:01.713179 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-72h2c" event={"ID":"e9fdda75-f32d-445e-9658-d135d0a548ae","Type":"ContainerDied","Data":"e343495cab09c67be6faf05abf996fdb7fa745506e1e281fb3849accc55bbfae"} Nov 21 13:45:01 crc kubenswrapper[5133]: I1121 13:45:01.717310 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-9mtt2" event={"ID":"589fc21a-acb4-4311-9e79-f9ad27c6f187","Type":"ContainerStarted","Data":"6d0164a558a19535948fb56d5f05f40a0102fa3faa561eda8cb6ff42d71f80e7"} Nov 21 13:45:01 crc kubenswrapper[5133]: I1121 13:45:01.720530 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qkmck" event={"ID":"22c9ec1f-50e2-455a-81d3-70a1df82c3ce","Type":"ContainerStarted","Data":"4fae03800b1d52e032244fef76d781e0b298403ac0d737185ced4b3028c0f733"} Nov 21 13:45:01 crc kubenswrapper[5133]: I1121 13:45:01.720574 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qkmck" event={"ID":"22c9ec1f-50e2-455a-81d3-70a1df82c3ce","Type":"ContainerStarted","Data":"69e80c871aae15e49bd8d25c138196e76cdf48af328054e37a4eb9b5d1a9d272"} Nov 21 13:45:01 crc kubenswrapper[5133]: I1121 13:45:01.726721 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-7jhbl" event={"ID":"bb29fa4c-cfd8-45b5-a0d3-895fb20d5cb8","Type":"ContainerStarted","Data":"c5d280b0aaabc91d3083c2ef14bc9971e16b2c1ced1bbcefe113ed2dca6c96cf"} Nov 21 13:45:01 crc kubenswrapper[5133]: I1121 13:45:01.727246 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-7jhbl" Nov 21 13:45:01 crc kubenswrapper[5133]: I1121 13:45:01.732337 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-w26jv" event={"ID":"576691ca-af69-46de-b8c2-5b2195e3db0b","Type":"ContainerStarted","Data":"fe17093702f056749d4743baac9c4006d4d330d98c18d0abe445fbe09ea03c89"} Nov 21 13:45:01 crc kubenswrapper[5133]: I1121 13:45:01.738489 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-4bw4h" event={"ID":"dfcd7a67-a498-4c5b-82e9-c998a388b652","Type":"ContainerStarted","Data":"a43709e6c5c3516206533602e38df668585133f452edcfd4656df1ea09cc1cdc"} Nov 21 13:45:01 crc kubenswrapper[5133]: I1121 13:45:01.741847 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-9rc8w" event={"ID":"f3cae218-7d6a-42c0-a269-ad8d27dfed75","Type":"ContainerStarted","Data":"c133fd0c1261f0a1e812037ae04aad1dd64e743c7602b14efa693d40c7de59ae"} Nov 21 13:45:01 crc kubenswrapper[5133]: I1121 13:45:01.741867 5133 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-5pm4x container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" start-of-body= Nov 21 13:45:01 crc kubenswrapper[5133]: I1121 13:45:01.741939 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395530-jlnll" Nov 21 13:45:01 crc kubenswrapper[5133]: I1121 13:45:01.741947 5133 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-5pm4x" podUID="e2755732-9869-46c9-be0e-e1fc77aa6644" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" Nov 21 13:45:01 crc kubenswrapper[5133]: I1121 13:45:01.751531 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:45:01 crc kubenswrapper[5133]: E1121 13:45:01.751837 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 13:45:02.251821584 +0000 UTC m=+162.049653832 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:01 crc kubenswrapper[5133]: I1121 13:45:01.852949 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:01 crc kubenswrapper[5133]: I1121 13:45:01.868554 5133 patch_prober.go:28] interesting pod/downloads-7954f5f757-7jhbl container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.32:8080/\": dial tcp 10.217.0.32:8080: connect: connection refused" start-of-body= Nov 21 13:45:01 crc kubenswrapper[5133]: I1121 13:45:01.868672 5133 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-7jhbl" podUID="bb29fa4c-cfd8-45b5-a0d3-895fb20d5cb8" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.32:8080/\": dial tcp 10.217.0.32:8080: connect: connection refused" Nov 21 13:45:01 crc kubenswrapper[5133]: E1121 13:45:01.889870 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 13:45:02.389824095 +0000 UTC m=+162.187656343 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5rjt4" (UID: "fc3cef5b-e125-43c6-be9e-52ae6617e01a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:01 crc kubenswrapper[5133]: I1121 13:45:01.894529 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-wdh4q" podStartSLOduration=129.8945058 podStartE2EDuration="2m9.8945058s" podCreationTimestamp="2025-11-21 13:42:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 13:45:01.86007769 +0000 UTC m=+161.657909938" watchObservedRunningTime="2025-11-21 13:45:01.8945058 +0000 UTC m=+161.692338048" Nov 21 13:45:01 crc kubenswrapper[5133]: I1121 13:45:01.969040 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:45:01 crc kubenswrapper[5133]: E1121 13:45:01.969149 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 13:45:02.469132537 +0000 UTC m=+162.266964785 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:01 crc kubenswrapper[5133]: I1121 13:45:01.969506 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:01 crc kubenswrapper[5133]: E1121 13:45:01.969903 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 13:45:02.469893207 +0000 UTC m=+162.267725455 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5rjt4" (UID: "fc3cef5b-e125-43c6-be9e-52ae6617e01a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:02 crc kubenswrapper[5133]: I1121 13:45:02.076178 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:45:02 crc kubenswrapper[5133]: E1121 13:45:02.076262 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 13:45:02.576228011 +0000 UTC m=+162.374060259 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:02 crc kubenswrapper[5133]: I1121 13:45:02.076474 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:02 crc kubenswrapper[5133]: E1121 13:45:02.076816 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 13:45:02.576807437 +0000 UTC m=+162.374639685 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5rjt4" (UID: "fc3cef5b-e125-43c6-be9e-52ae6617e01a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:02 crc kubenswrapper[5133]: I1121 13:45:02.107857 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-lz2px"] Nov 21 13:45:02 crc kubenswrapper[5133]: I1121 13:45:02.138040 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-mghzk"] Nov 21 13:45:02 crc kubenswrapper[5133]: W1121 13:45:02.140165 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9c99dec9_4fc0_4576_81e4_703a690f4f45.slice/crio-ff908d10742134432a87b2d5b5c5ea712e28869562584319c40d31d7c6b47521 WatchSource:0}: Error finding container ff908d10742134432a87b2d5b5c5ea712e28869562584319c40d31d7c6b47521: Status 404 returned error can't find the container with id ff908d10742134432a87b2d5b5c5ea712e28869562584319c40d31d7c6b47521 Nov 21 13:45:02 crc kubenswrapper[5133]: I1121 13:45:02.147783 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-ddmrq"] Nov 21 13:45:02 crc kubenswrapper[5133]: I1121 13:45:02.168218 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-zd8tq"] Nov 21 13:45:02 crc kubenswrapper[5133]: W1121 13:45:02.176646 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8bbf68aa_448e_453c_8df2_839594103920.slice/crio-a8f12750df1a1069e492d316ba24f34d000032db0ad07a007d0f7c5fb4648cc2 WatchSource:0}: Error finding container a8f12750df1a1069e492d316ba24f34d000032db0ad07a007d0f7c5fb4648cc2: Status 404 returned error can't find the container with id a8f12750df1a1069e492d316ba24f34d000032db0ad07a007d0f7c5fb4648cc2 Nov 21 13:45:02 crc kubenswrapper[5133]: I1121 13:45:02.177816 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:45:02 crc kubenswrapper[5133]: E1121 13:45:02.178256 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 13:45:02.67823534 +0000 UTC m=+162.476067588 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:02 crc kubenswrapper[5133]: I1121 13:45:02.215866 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-bhlrh"] Nov 21 13:45:02 crc kubenswrapper[5133]: W1121 13:45:02.251484 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda06ef925_e180_4dd7_b57f_3d3017be6bd0.slice/crio-8a3bc4949e84b4553b26883ae7d286996968ba660698bc7a92ad88ef8979c79b WatchSource:0}: Error finding container 8a3bc4949e84b4553b26883ae7d286996968ba660698bc7a92ad88ef8979c79b: Status 404 returned error can't find the container with id 8a3bc4949e84b4553b26883ae7d286996968ba660698bc7a92ad88ef8979c79b Nov 21 13:45:02 crc kubenswrapper[5133]: I1121 13:45:02.255126 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-dvnq8"] Nov 21 13:45:02 crc kubenswrapper[5133]: I1121 13:45:02.274885 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-6fn5b"] Nov 21 13:45:02 crc kubenswrapper[5133]: I1121 13:45:02.279282 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:02 crc kubenswrapper[5133]: E1121 13:45:02.279765 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 13:45:02.779748396 +0000 UTC m=+162.577580634 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5rjt4" (UID: "fc3cef5b-e125-43c6-be9e-52ae6617e01a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:02 crc kubenswrapper[5133]: I1121 13:45:02.381065 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:45:02 crc kubenswrapper[5133]: E1121 13:45:02.381320 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 13:45:02.881291962 +0000 UTC m=+162.679124210 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:02 crc kubenswrapper[5133]: I1121 13:45:02.381516 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:02 crc kubenswrapper[5133]: E1121 13:45:02.381871 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 13:45:02.881864097 +0000 UTC m=+162.679696335 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5rjt4" (UID: "fc3cef5b-e125-43c6-be9e-52ae6617e01a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:02 crc kubenswrapper[5133]: I1121 13:45:02.452822 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zg29g" podStartSLOduration=129.452802735 podStartE2EDuration="2m9.452802735s" podCreationTimestamp="2025-11-21 13:42:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 13:45:02.44923843 +0000 UTC m=+162.247070688" watchObservedRunningTime="2025-11-21 13:45:02.452802735 +0000 UTC m=+162.250634973" Nov 21 13:45:02 crc kubenswrapper[5133]: I1121 13:45:02.483298 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:45:02 crc kubenswrapper[5133]: E1121 13:45:02.483615 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 13:45:02.983599389 +0000 UTC m=+162.781431637 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:02 crc kubenswrapper[5133]: I1121 13:45:02.514948 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-j9kwz"] Nov 21 13:45:02 crc kubenswrapper[5133]: I1121 13:45:02.530750 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-ntz66"] Nov 21 13:45:02 crc kubenswrapper[5133]: I1121 13:45:02.579296 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-7jqbx"] Nov 21 13:45:02 crc kubenswrapper[5133]: I1121 13:45:02.585082 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-9zjjz"] Nov 21 13:45:02 crc kubenswrapper[5133]: I1121 13:45:02.586405 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:02 crc kubenswrapper[5133]: E1121 13:45:02.586905 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 13:45:03.086888362 +0000 UTC m=+162.884720610 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5rjt4" (UID: "fc3cef5b-e125-43c6-be9e-52ae6617e01a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:02 crc kubenswrapper[5133]: I1121 13:45:02.602106 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395545-wrdtj"] Nov 21 13:45:02 crc kubenswrapper[5133]: I1121 13:45:02.609717 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-f9kbn"] Nov 21 13:45:02 crc kubenswrapper[5133]: I1121 13:45:02.613626 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-p929b"] Nov 21 13:45:02 crc kubenswrapper[5133]: I1121 13:45:02.637794 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-9qx4w"] Nov 21 13:45:02 crc kubenswrapper[5133]: I1121 13:45:02.639772 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-zfc95"] Nov 21 13:45:02 crc kubenswrapper[5133]: I1121 13:45:02.665803 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-sjflw"] Nov 21 13:45:02 crc kubenswrapper[5133]: I1121 13:45:02.667379 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-q7g4h"] Nov 21 13:45:02 crc kubenswrapper[5133]: I1121 13:45:02.687271 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:45:02 crc kubenswrapper[5133]: E1121 13:45:02.689214 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 13:45:03.189176818 +0000 UTC m=+162.987009206 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:02 crc kubenswrapper[5133]: W1121 13:45:02.701757 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9e90fcb6_b3a1_4545_8d65_c9e13a61828b.slice/crio-c0f7219232be718015a7fa9a787ecc61082ef0272bbe66b24a18eb1b33813a10 WatchSource:0}: Error finding container c0f7219232be718015a7fa9a787ecc61082ef0272bbe66b24a18eb1b33813a10: Status 404 returned error can't find the container with id c0f7219232be718015a7fa9a787ecc61082ef0272bbe66b24a18eb1b33813a10 Nov 21 13:45:02 crc kubenswrapper[5133]: W1121 13:45:02.702454 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod89d95e05_f94e_4397_9cf2_54c67401b6e2.slice/crio-956aa6d23723a653b29ac2c5a90f3dd0abda95f53dc403524c0907076f3bd1d4 WatchSource:0}: Error finding container 956aa6d23723a653b29ac2c5a90f3dd0abda95f53dc403524c0907076f3bd1d4: Status 404 returned error can't find the container with id 956aa6d23723a653b29ac2c5a90f3dd0abda95f53dc403524c0907076f3bd1d4 Nov 21 13:45:02 crc kubenswrapper[5133]: W1121 13:45:02.711222 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcc9dbfec_ae22_44bf_9464_1be9c6e20833.slice/crio-c555e3e9911f0b75bb1e7701b2f4d35c37bdaface8a0230e129163fe6a446cff WatchSource:0}: Error finding container c555e3e9911f0b75bb1e7701b2f4d35c37bdaface8a0230e129163fe6a446cff: Status 404 returned error can't find the container with id c555e3e9911f0b75bb1e7701b2f4d35c37bdaface8a0230e129163fe6a446cff Nov 21 13:45:02 crc kubenswrapper[5133]: I1121 13:45:02.742981 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395530-jlnll"] Nov 21 13:45:02 crc kubenswrapper[5133]: I1121 13:45:02.746320 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395530-jlnll"] Nov 21 13:45:02 crc kubenswrapper[5133]: I1121 13:45:02.766315 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-ddmrq" event={"ID":"8ccfd073-ef60-4c22-a8f9-80c50ed1ca42","Type":"ContainerStarted","Data":"ce2017a8a2dca53ac22ec56a7189507a64df6c2d339571c526f26516c615ed49"} Nov 21 13:45:02 crc kubenswrapper[5133]: I1121 13:45:02.798338 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-jpq5h" event={"ID":"12eb63da-c25d-4d6a-84e5-070b0c9bd1aa","Type":"ContainerStarted","Data":"7b366b477e88f9d3bb42973e07d468d702b10bee80c93e0bc24dd07883b3d6b6"} Nov 21 13:45:02 crc kubenswrapper[5133]: I1121 13:45:02.799713 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:02 crc kubenswrapper[5133]: E1121 13:45:02.800904 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 13:45:03.300882456 +0000 UTC m=+163.098714724 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5rjt4" (UID: "fc3cef5b-e125-43c6-be9e-52ae6617e01a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:02 crc kubenswrapper[5133]: I1121 13:45:02.804234 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-9mtt2" event={"ID":"589fc21a-acb4-4311-9e79-f9ad27c6f187","Type":"ContainerStarted","Data":"ce8c62c5dd87c8f3cab70df54b0af9d9c05fb29de3c25ca3507a792191119f95"} Nov 21 13:45:02 crc kubenswrapper[5133]: I1121 13:45:02.816348 5133 generic.go:334] "Generic (PLEG): container finished" podID="04f53da9-c9c4-4af6-a8e1-37e91549a81a" containerID="563bc4f0fccda33e6c331dc10105daab284d62947fa6cdc0327c2caa8b043134" exitCode=0 Nov 21 13:45:02 crc kubenswrapper[5133]: I1121 13:45:02.816443 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-dn6hh" event={"ID":"04f53da9-c9c4-4af6-a8e1-37e91549a81a","Type":"ContainerDied","Data":"563bc4f0fccda33e6c331dc10105daab284d62947fa6cdc0327c2caa8b043134"} Nov 21 13:45:02 crc kubenswrapper[5133]: I1121 13:45:02.819862 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-6fn5b" event={"ID":"f49021c5-9619-47e6-a758-d800793dae08","Type":"ContainerStarted","Data":"29ae247508b79e2a22c79f58f323ca6de871ff6453269c786e34290bda41ab63"} Nov 21 13:45:02 crc kubenswrapper[5133]: I1121 13:45:02.822083 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-mghzk" event={"ID":"e7a05b22-40d7-44b3-ab76-5b1548e042ba","Type":"ContainerStarted","Data":"64a0d258434a382cb58ad9a18586e6e4e62cf2e72297e237741d41fb93190f6f"} Nov 21 13:45:02 crc kubenswrapper[5133]: I1121 13:45:02.824264 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-f9kbn" event={"ID":"f1a068fb-7569-4abd-a6cc-1eef000fd386","Type":"ContainerStarted","Data":"80bbe939541dc9cbd8e25a3455b4eda47eeaeac4e5b9bfced673659355157e30"} Nov 21 13:45:02 crc kubenswrapper[5133]: I1121 13:45:02.878294 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-sspkx" event={"ID":"5686b316-695d-4eab-a619-1033b90d6d96","Type":"ContainerStarted","Data":"4d52df0454871b2f70c53172b268b32c50704c83d0a2f8cfd95524abe88cbf50"} Nov 21 13:45:02 crc kubenswrapper[5133]: I1121 13:45:02.919537 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:45:02 crc kubenswrapper[5133]: E1121 13:45:02.919832 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 13:45:03.419810207 +0000 UTC m=+163.217642445 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:02 crc kubenswrapper[5133]: I1121 13:45:02.920058 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:02 crc kubenswrapper[5133]: E1121 13:45:02.920385 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 13:45:03.420378442 +0000 UTC m=+163.218210690 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5rjt4" (UID: "fc3cef5b-e125-43c6-be9e-52ae6617e01a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:02 crc kubenswrapper[5133]: I1121 13:45:02.962100 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-lz2px" event={"ID":"9c99dec9-4fc0-4576-81e4-703a690f4f45","Type":"ContainerStarted","Data":"ff908d10742134432a87b2d5b5c5ea712e28869562584319c40d31d7c6b47521"} Nov 21 13:45:02 crc kubenswrapper[5133]: I1121 13:45:02.982324 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-w26jv" event={"ID":"576691ca-af69-46de-b8c2-5b2195e3db0b","Type":"ContainerStarted","Data":"9cca90126e3e48a5497aaeb9d3adf8f35f5ed483d40c0462513ccdb472a4d4c5"} Nov 21 13:45:02 crc kubenswrapper[5133]: I1121 13:45:02.982826 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-w26jv" event={"ID":"576691ca-af69-46de-b8c2-5b2195e3db0b","Type":"ContainerStarted","Data":"497d7d2c62d8a07080726d12ffc0d1d5e4f1245490c718ebd9cef8c0bde79047"} Nov 21 13:45:02 crc kubenswrapper[5133]: I1121 13:45:02.989275 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-m4jcp" event={"ID":"a84abf7e-43b7-4483-a08e-270294215dff","Type":"ContainerStarted","Data":"2c4e0a89375b2fcad69736a6101a492a0bbc14cfda09af011d99cd53ec808a33"} Nov 21 13:45:03 crc kubenswrapper[5133]: I1121 13:45:03.006957 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-mp878" event={"ID":"b647f6d6-6425-43f9-9e70-3a66dee6f37c","Type":"ContainerStarted","Data":"3466187715f57f5dcf799f06a357dba345b3e4c75106512197b30fa902430aa7"} Nov 21 13:45:03 crc kubenswrapper[5133]: I1121 13:45:03.032221 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395545-wrdtj" event={"ID":"af22fa12-851f-4ec2-81f1-b3df1186e00c","Type":"ContainerStarted","Data":"d7f08fb427180d9a6b3a0ffa16ebe453ab2e4b5860112f2f70178a6d7d63d5d4"} Nov 21 13:45:03 crc kubenswrapper[5133]: I1121 13:45:03.041289 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-x7zf8" event={"ID":"dc4b9d28-3c54-466b-b41d-0f1381490b02","Type":"ContainerStarted","Data":"e57655214b0e6b4d694787ee9c06e0705a588840439168a506bb2b006ac44b86"} Nov 21 13:45:03 crc kubenswrapper[5133]: I1121 13:45:03.044347 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:45:03 crc kubenswrapper[5133]: E1121 13:45:03.046524 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 13:45:03.546498756 +0000 UTC m=+163.344331004 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:03 crc kubenswrapper[5133]: I1121 13:45:03.049775 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:03 crc kubenswrapper[5133]: E1121 13:45:03.050431 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 13:45:03.550414191 +0000 UTC m=+163.348246439 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5rjt4" (UID: "fc3cef5b-e125-43c6-be9e-52ae6617e01a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:03 crc kubenswrapper[5133]: I1121 13:45:03.067351 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-4bw4h" podStartSLOduration=130.067099637 podStartE2EDuration="2m10.067099637s" podCreationTimestamp="2025-11-21 13:42:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 13:45:03.008780907 +0000 UTC m=+162.806613155" watchObservedRunningTime="2025-11-21 13:45:03.067099637 +0000 UTC m=+162.864931885" Nov 21 13:45:03 crc kubenswrapper[5133]: I1121 13:45:03.078927 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-qkmck" podStartSLOduration=131.078907673 podStartE2EDuration="2m11.078907673s" podCreationTimestamp="2025-11-21 13:42:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 13:45:03.028586777 +0000 UTC m=+162.826419045" watchObservedRunningTime="2025-11-21 13:45:03.078907673 +0000 UTC m=+162.876739931" Nov 21 13:45:03 crc kubenswrapper[5133]: I1121 13:45:03.102441 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-m54n8" event={"ID":"d1ec861e-fbe3-412e-9885-43a9e3c5be1e","Type":"ContainerStarted","Data":"4063bcbf9055b4a7f0f3a87184b54bdc72b5ced57e980761037f66eb5c77c9ae"} Nov 21 13:45:03 crc kubenswrapper[5133]: I1121 13:45:03.125605 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-7jhbl" podStartSLOduration=131.125587292 podStartE2EDuration="2m11.125587292s" podCreationTimestamp="2025-11-21 13:42:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 13:45:03.12252636 +0000 UTC m=+162.920358598" watchObservedRunningTime="2025-11-21 13:45:03.125587292 +0000 UTC m=+162.923419540" Nov 21 13:45:03 crc kubenswrapper[5133]: I1121 13:45:03.142599 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-ntz66" event={"ID":"5c6271e9-d951-4d54-9db4-1b1df5c9d3f1","Type":"ContainerStarted","Data":"74c0995e665c395a82940a57539e2702c334a954fa19b36581e4fcaf4138c7e4"} Nov 21 13:45:03 crc kubenswrapper[5133]: I1121 13:45:03.151624 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:45:03 crc kubenswrapper[5133]: E1121 13:45:03.154667 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 13:45:03.654641929 +0000 UTC m=+163.452474177 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:03 crc kubenswrapper[5133]: I1121 13:45:03.174949 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-9rc8w" event={"ID":"f3cae218-7d6a-42c0-a269-ad8d27dfed75","Type":"ContainerDied","Data":"e8f27605ade7a9de4ecbee86d278c645dc0c00a79941020d87bdfb6c9f67c6b0"} Nov 21 13:45:03 crc kubenswrapper[5133]: I1121 13:45:03.175142 5133 generic.go:334] "Generic (PLEG): container finished" podID="f3cae218-7d6a-42c0-a269-ad8d27dfed75" containerID="e8f27605ade7a9de4ecbee86d278c645dc0c00a79941020d87bdfb6c9f67c6b0" exitCode=0 Nov 21 13:45:03 crc kubenswrapper[5133]: I1121 13:45:03.186453 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-q7g4h" event={"ID":"9e90fcb6-b3a1-4545-8d65-c9e13a61828b","Type":"ContainerStarted","Data":"c0f7219232be718015a7fa9a787ecc61082ef0272bbe66b24a18eb1b33813a10"} Nov 21 13:45:03 crc kubenswrapper[5133]: I1121 13:45:03.195351 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-ggrbw" event={"ID":"9b4d3b1f-5c64-4f18-91ca-d70893516609","Type":"ContainerStarted","Data":"1b16ef56ff44170737e4ac90548fe825b34d555b37d47fd869e68e9bda87dc4a"} Nov 21 13:45:03 crc kubenswrapper[5133]: I1121 13:45:03.197221 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-ggrbw" Nov 21 13:45:03 crc kubenswrapper[5133]: I1121 13:45:03.210508 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-dvnq8" event={"ID":"3923f6ff-3c6d-42d8-8000-91004cf58dd4","Type":"ContainerStarted","Data":"31ec114fcf5c851d976005adc1b1a173c6f7ecf68257bf272e032573141497eb"} Nov 21 13:45:03 crc kubenswrapper[5133]: I1121 13:45:03.227676 5133 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-ggrbw container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.9:6443/healthz\": dial tcp 10.217.0.9:6443: connect: connection refused" start-of-body= Nov 21 13:45:03 crc kubenswrapper[5133]: I1121 13:45:03.227735 5133 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-ggrbw" podUID="9b4d3b1f-5c64-4f18-91ca-d70893516609" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.9:6443/healthz\": dial tcp 10.217.0.9:6443: connect: connection refused" Nov 21 13:45:03 crc kubenswrapper[5133]: I1121 13:45:03.238558 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-9jc2h" event={"ID":"aaaa8cb6-0a5e-4dd2-b549-f449a6e964cd","Type":"ContainerStarted","Data":"9b8401a883c805ef23fbbd386e987c6d3d32dbeddb3fe9e88259e2bd7fcf005d"} Nov 21 13:45:03 crc kubenswrapper[5133]: I1121 13:45:03.243278 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-sjflw" event={"ID":"cc9dbfec-ae22-44bf-9464-1be9c6e20833","Type":"ContainerStarted","Data":"c555e3e9911f0b75bb1e7701b2f4d35c37bdaface8a0230e129163fe6a446cff"} Nov 21 13:45:03 crc kubenswrapper[5133]: I1121 13:45:03.249676 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-zfc95" event={"ID":"89d95e05-f94e-4397-9cf2-54c67401b6e2","Type":"ContainerStarted","Data":"956aa6d23723a653b29ac2c5a90f3dd0abda95f53dc403524c0907076f3bd1d4"} Nov 21 13:45:03 crc kubenswrapper[5133]: I1121 13:45:03.251522 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-9zjjz" event={"ID":"63aa8ca7-9e72-4f3c-85e9-b9a423371b21","Type":"ContainerStarted","Data":"b9690de741b6da466ba6bc12b7d00406df4d6dbae49a7a5541bff1b749d86315"} Nov 21 13:45:03 crc kubenswrapper[5133]: I1121 13:45:03.254729 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:03 crc kubenswrapper[5133]: E1121 13:45:03.255137 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 13:45:03.755121077 +0000 UTC m=+163.552953325 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5rjt4" (UID: "fc3cef5b-e125-43c6-be9e-52ae6617e01a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:03 crc kubenswrapper[5133]: I1121 13:45:03.274885 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-9mtt2" podStartSLOduration=6.274866655 podStartE2EDuration="6.274866655s" podCreationTimestamp="2025-11-21 13:44:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 13:45:03.271808203 +0000 UTC m=+163.069640461" watchObservedRunningTime="2025-11-21 13:45:03.274866655 +0000 UTC m=+163.072698903" Nov 21 13:45:03 crc kubenswrapper[5133]: I1121 13:45:03.299300 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-p929b" event={"ID":"e38db8a2-e4d9-49c7-9f87-81518e834b23","Type":"ContainerStarted","Data":"e2a496ac00968ac15abc54dd9523c837861f3ab771a903d632caab22c92d8b05"} Nov 21 13:45:03 crc kubenswrapper[5133]: I1121 13:45:03.322226 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-sspkx" podStartSLOduration=130.322198341 podStartE2EDuration="2m10.322198341s" podCreationTimestamp="2025-11-21 13:42:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 13:45:03.320321861 +0000 UTC m=+163.118154119" watchObservedRunningTime="2025-11-21 13:45:03.322198341 +0000 UTC m=+163.120030589" Nov 21 13:45:03 crc kubenswrapper[5133]: I1121 13:45:03.355689 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:45:03 crc kubenswrapper[5133]: I1121 13:45:03.357849 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-p6f2d" event={"ID":"6069f0ea-bc74-4858-a564-156202fbe36d","Type":"ContainerStarted","Data":"697319c8977d89e3ab4f2bc48a72d7cdbc084f188197a19776d1c6f80b6d60ad"} Nov 21 13:45:03 crc kubenswrapper[5133]: E1121 13:45:03.360286 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 13:45:03.860250589 +0000 UTC m=+163.658082827 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:03 crc kubenswrapper[5133]: I1121 13:45:03.380548 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-9qx4w" event={"ID":"5ff7be36-18ee-41ff-b8b6-4ea13387d7f2","Type":"ContainerStarted","Data":"4971c941b392701d4c8b6c35ae8174a5900ae80435d8a899f908aa6057e892b8"} Nov 21 13:45:03 crc kubenswrapper[5133]: I1121 13:45:03.401128 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-72h2c" podStartSLOduration=130.401106282 podStartE2EDuration="2m10.401106282s" podCreationTimestamp="2025-11-21 13:42:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 13:45:03.369546877 +0000 UTC m=+163.167379155" watchObservedRunningTime="2025-11-21 13:45:03.401106282 +0000 UTC m=+163.198938530" Nov 21 13:45:03 crc kubenswrapper[5133]: I1121 13:45:03.402707 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-m4jcp" podStartSLOduration=6.402697094 podStartE2EDuration="6.402697094s" podCreationTimestamp="2025-11-21 13:44:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 13:45:03.400733292 +0000 UTC m=+163.198565550" watchObservedRunningTime="2025-11-21 13:45:03.402697094 +0000 UTC m=+163.200529342" Nov 21 13:45:03 crc kubenswrapper[5133]: I1121 13:45:03.407386 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-z98jb" event={"ID":"25198fe7-40d9-4add-871c-8f9adadddb75","Type":"ContainerStarted","Data":"f133a61f9b887cc443d535207713cf1a49b053f81489f4b2d458780e4312c657"} Nov 21 13:45:03 crc kubenswrapper[5133]: I1121 13:45:03.408494 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-z98jb" Nov 21 13:45:03 crc kubenswrapper[5133]: I1121 13:45:03.412132 5133 patch_prober.go:28] interesting pod/console-operator-58897d9998-z98jb container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.11:8443/readyz\": dial tcp 10.217.0.11:8443: connect: connection refused" start-of-body= Nov 21 13:45:03 crc kubenswrapper[5133]: I1121 13:45:03.412188 5133 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-z98jb" podUID="25198fe7-40d9-4add-871c-8f9adadddb75" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.11:8443/readyz\": dial tcp 10.217.0.11:8443: connect: connection refused" Nov 21 13:45:03 crc kubenswrapper[5133]: I1121 13:45:03.439976 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-zd8tq" event={"ID":"8bbf68aa-448e-453c-8df2-839594103920","Type":"ContainerStarted","Data":"c2b164d932ba8e7622063737dc8b40874979e3707cbda6cc12bb8bb02d12e74e"} Nov 21 13:45:03 crc kubenswrapper[5133]: I1121 13:45:03.440041 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-zd8tq" event={"ID":"8bbf68aa-448e-453c-8df2-839594103920","Type":"ContainerStarted","Data":"a8f12750df1a1069e492d316ba24f34d000032db0ad07a007d0f7c5fb4648cc2"} Nov 21 13:45:03 crc kubenswrapper[5133]: I1121 13:45:03.440812 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-zd8tq" Nov 21 13:45:03 crc kubenswrapper[5133]: I1121 13:45:03.445503 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-m54n8" podStartSLOduration=131.445480619 podStartE2EDuration="2m11.445480619s" podCreationTimestamp="2025-11-21 13:42:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 13:45:03.438863762 +0000 UTC m=+163.236696010" watchObservedRunningTime="2025-11-21 13:45:03.445480619 +0000 UTC m=+163.243312867" Nov 21 13:45:03 crc kubenswrapper[5133]: I1121 13:45:03.457847 5133 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-zd8tq container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.23:8080/healthz\": dial tcp 10.217.0.23:8080: connect: connection refused" start-of-body= Nov 21 13:45:03 crc kubenswrapper[5133]: I1121 13:45:03.457932 5133 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-zd8tq" podUID="8bbf68aa-448e-453c-8df2-839594103920" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.23:8080/healthz\": dial tcp 10.217.0.23:8080: connect: connection refused" Nov 21 13:45:03 crc kubenswrapper[5133]: I1121 13:45:03.460968 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:03 crc kubenswrapper[5133]: E1121 13:45:03.462438 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 13:45:03.962420752 +0000 UTC m=+163.760253220 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5rjt4" (UID: "fc3cef5b-e125-43c6-be9e-52ae6617e01a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:03 crc kubenswrapper[5133]: I1121 13:45:03.490436 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-bhlrh" event={"ID":"a06ef925-e180-4dd7-b57f-3d3017be6bd0","Type":"ContainerStarted","Data":"9099ef55a1469a5fc63675bfdb560abd0125771b48123d27b46974f586158902"} Nov 21 13:45:03 crc kubenswrapper[5133]: I1121 13:45:03.490546 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-bhlrh" event={"ID":"a06ef925-e180-4dd7-b57f-3d3017be6bd0","Type":"ContainerStarted","Data":"8a3bc4949e84b4553b26883ae7d286996968ba660698bc7a92ad88ef8979c79b"} Nov 21 13:45:03 crc kubenswrapper[5133]: I1121 13:45:03.490731 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-x7zf8" podStartSLOduration=131.490697808 podStartE2EDuration="2m11.490697808s" podCreationTimestamp="2025-11-21 13:42:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 13:45:03.490082912 +0000 UTC m=+163.287915160" watchObservedRunningTime="2025-11-21 13:45:03.490697808 +0000 UTC m=+163.288530056" Nov 21 13:45:03 crc kubenswrapper[5133]: I1121 13:45:03.514470 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-65hwm" event={"ID":"f7d3e23a-de5e-4562-9142-16955fe746ad","Type":"ContainerStarted","Data":"33676b7f46bf3a5e81326281ff4d7abeb644226e258237fab6a6683bc1ea1f9d"} Nov 21 13:45:03 crc kubenswrapper[5133]: I1121 13:45:03.522056 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-7jqbx" event={"ID":"d7fb3acb-7ca2-40c1-bffa-beb98c676906","Type":"ContainerStarted","Data":"a25d45c935d3887abf4adccec9c251dbd5f7fb4783a3e5bdb031c4d7d02d587f"} Nov 21 13:45:03 crc kubenswrapper[5133]: I1121 13:45:03.524376 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-j9kwz" event={"ID":"4b5e9384-1ed0-4b0f-8a0a-ea288a701e4a","Type":"ContainerStarted","Data":"1ea855e5dd6e974b61a64052535d161142ef6b2dede88e8bd2421aa3249f640f"} Nov 21 13:45:03 crc kubenswrapper[5133]: I1121 13:45:03.532329 5133 patch_prober.go:28] interesting pod/downloads-7954f5f757-7jhbl container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.32:8080/\": dial tcp 10.217.0.32:8080: connect: connection refused" start-of-body= Nov 21 13:45:03 crc kubenswrapper[5133]: I1121 13:45:03.532385 5133 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-7jhbl" podUID="bb29fa4c-cfd8-45b5-a0d3-895fb20d5cb8" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.32:8080/\": dial tcp 10.217.0.32:8080: connect: connection refused" Nov 21 13:45:03 crc kubenswrapper[5133]: I1121 13:45:03.562648 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:45:03 crc kubenswrapper[5133]: E1121 13:45:03.564662 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 13:45:04.064643026 +0000 UTC m=+163.862475274 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:03 crc kubenswrapper[5133]: I1121 13:45:03.665165 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:03 crc kubenswrapper[5133]: E1121 13:45:03.669612 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 13:45:04.167994421 +0000 UTC m=+163.965826659 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5rjt4" (UID: "fc3cef5b-e125-43c6-be9e-52ae6617e01a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:03 crc kubenswrapper[5133]: I1121 13:45:03.672632 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-z98jb" podStartSLOduration=131.672615944 podStartE2EDuration="2m11.672615944s" podCreationTimestamp="2025-11-21 13:42:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 13:45:03.665208656 +0000 UTC m=+163.463040904" watchObservedRunningTime="2025-11-21 13:45:03.672615944 +0000 UTC m=+163.470448182" Nov 21 13:45:03 crc kubenswrapper[5133]: I1121 13:45:03.672926 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-w26jv" podStartSLOduration=130.672922342 podStartE2EDuration="2m10.672922342s" podCreationTimestamp="2025-11-21 13:42:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 13:45:03.605954221 +0000 UTC m=+163.403786469" watchObservedRunningTime="2025-11-21 13:45:03.672922342 +0000 UTC m=+163.470754581" Nov 21 13:45:03 crc kubenswrapper[5133]: I1121 13:45:03.741496 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-65hwm" podStartSLOduration=131.741467956 podStartE2EDuration="2m11.741467956s" podCreationTimestamp="2025-11-21 13:42:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 13:45:03.697424468 +0000 UTC m=+163.495256716" watchObservedRunningTime="2025-11-21 13:45:03.741467956 +0000 UTC m=+163.539300204" Nov 21 13:45:03 crc kubenswrapper[5133]: I1121 13:45:03.742456 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-ggrbw" podStartSLOduration=131.742446602 podStartE2EDuration="2m11.742446602s" podCreationTimestamp="2025-11-21 13:42:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 13:45:03.742332459 +0000 UTC m=+163.540164697" watchObservedRunningTime="2025-11-21 13:45:03.742446602 +0000 UTC m=+163.540278850" Nov 21 13:45:03 crc kubenswrapper[5133]: I1121 13:45:03.766786 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:45:03 crc kubenswrapper[5133]: E1121 13:45:03.767650 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 13:45:04.267626016 +0000 UTC m=+164.065458264 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:03 crc kubenswrapper[5133]: I1121 13:45:03.775521 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-9jc2h" podStartSLOduration=131.775500396 podStartE2EDuration="2m11.775500396s" podCreationTimestamp="2025-11-21 13:42:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 13:45:03.773441791 +0000 UTC m=+163.571274039" watchObservedRunningTime="2025-11-21 13:45:03.775500396 +0000 UTC m=+163.573332644" Nov 21 13:45:03 crc kubenswrapper[5133]: I1121 13:45:03.797339 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-x7zf8" Nov 21 13:45:03 crc kubenswrapper[5133]: I1121 13:45:03.797533 5133 patch_prober.go:28] interesting pod/router-default-5444994796-x7zf8 container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Nov 21 13:45:03 crc kubenswrapper[5133]: I1121 13:45:03.797571 5133 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-x7zf8" podUID="dc4b9d28-3c54-466b-b41d-0f1381490b02" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Nov 21 13:45:03 crc kubenswrapper[5133]: I1121 13:45:03.820711 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-zd8tq" podStartSLOduration=130.820690135 podStartE2EDuration="2m10.820690135s" podCreationTimestamp="2025-11-21 13:42:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 13:45:03.811194471 +0000 UTC m=+163.609026719" watchObservedRunningTime="2025-11-21 13:45:03.820690135 +0000 UTC m=+163.618522383" Nov 21 13:45:03 crc kubenswrapper[5133]: I1121 13:45:03.868853 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:03 crc kubenswrapper[5133]: E1121 13:45:03.869225 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 13:45:04.369213463 +0000 UTC m=+164.167045711 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5rjt4" (UID: "fc3cef5b-e125-43c6-be9e-52ae6617e01a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:03 crc kubenswrapper[5133]: I1121 13:45:03.970411 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:45:03 crc kubenswrapper[5133]: E1121 13:45:03.970891 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 13:45:04.470872623 +0000 UTC m=+164.268704871 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:04 crc kubenswrapper[5133]: I1121 13:45:04.072110 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:04 crc kubenswrapper[5133]: E1121 13:45:04.072541 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 13:45:04.572526202 +0000 UTC m=+164.370358450 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5rjt4" (UID: "fc3cef5b-e125-43c6-be9e-52ae6617e01a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:04 crc kubenswrapper[5133]: I1121 13:45:04.175053 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:45:04 crc kubenswrapper[5133]: E1121 13:45:04.175389 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 13:45:04.675358582 +0000 UTC m=+164.473190830 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:04 crc kubenswrapper[5133]: I1121 13:45:04.175460 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:04 crc kubenswrapper[5133]: E1121 13:45:04.175846 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 13:45:04.675824655 +0000 UTC m=+164.473656903 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5rjt4" (UID: "fc3cef5b-e125-43c6-be9e-52ae6617e01a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:04 crc kubenswrapper[5133]: I1121 13:45:04.277767 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:45:04 crc kubenswrapper[5133]: E1121 13:45:04.278172 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 13:45:04.778154222 +0000 UTC m=+164.575986470 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:04 crc kubenswrapper[5133]: I1121 13:45:04.380723 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:04 crc kubenswrapper[5133]: E1121 13:45:04.381408 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 13:45:04.881393354 +0000 UTC m=+164.679225602 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5rjt4" (UID: "fc3cef5b-e125-43c6-be9e-52ae6617e01a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:04 crc kubenswrapper[5133]: I1121 13:45:04.467793 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0da7ec78-a448-44eb-b829-20cf97a0bbcf" path="/var/lib/kubelet/pods/0da7ec78-a448-44eb-b829-20cf97a0bbcf/volumes" Nov 21 13:45:04 crc kubenswrapper[5133]: I1121 13:45:04.487404 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:45:04 crc kubenswrapper[5133]: E1121 13:45:04.487925 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 13:45:04.987882172 +0000 UTC m=+164.785714420 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:04 crc kubenswrapper[5133]: I1121 13:45:04.566326 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-sjflw" event={"ID":"cc9dbfec-ae22-44bf-9464-1be9c6e20833","Type":"ContainerStarted","Data":"42ce95a1dc724bd365415fa6a81a1cd4eae5c10f5eb418216df419959f6d41a8"} Nov 21 13:45:04 crc kubenswrapper[5133]: I1121 13:45:04.576885 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-sjflw" Nov 21 13:45:04 crc kubenswrapper[5133]: I1121 13:45:04.576918 5133 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-sjflw container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.38:8443/healthz\": dial tcp 10.217.0.38:8443: connect: connection refused" start-of-body= Nov 21 13:45:04 crc kubenswrapper[5133]: I1121 13:45:04.577067 5133 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-sjflw" podUID="cc9dbfec-ae22-44bf-9464-1be9c6e20833" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.38:8443/healthz\": dial tcp 10.217.0.38:8443: connect: connection refused" Nov 21 13:45:04 crc kubenswrapper[5133]: I1121 13:45:04.589486 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:04 crc kubenswrapper[5133]: E1121 13:45:04.590662 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 13:45:05.090633651 +0000 UTC m=+164.888465899 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5rjt4" (UID: "fc3cef5b-e125-43c6-be9e-52ae6617e01a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:04 crc kubenswrapper[5133]: I1121 13:45:04.638237 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-sjflw" podStartSLOduration=131.638210154 podStartE2EDuration="2m11.638210154s" podCreationTimestamp="2025-11-21 13:42:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 13:45:04.636035935 +0000 UTC m=+164.433868183" watchObservedRunningTime="2025-11-21 13:45:04.638210154 +0000 UTC m=+164.436042412" Nov 21 13:45:04 crc kubenswrapper[5133]: I1121 13:45:04.642563 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-jpq5h" event={"ID":"12eb63da-c25d-4d6a-84e5-070b0c9bd1aa","Type":"ContainerStarted","Data":"f0c40780efd7cb8d43e2a3b020615c9af1c325e0b276b73a4ba2e458a40df5b5"} Nov 21 13:45:04 crc kubenswrapper[5133]: I1121 13:45:04.660962 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-p6f2d" podStartSLOduration=132.660935721 podStartE2EDuration="2m12.660935721s" podCreationTimestamp="2025-11-21 13:42:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 13:45:03.838634255 +0000 UTC m=+163.636466503" watchObservedRunningTime="2025-11-21 13:45:04.660935721 +0000 UTC m=+164.458767969" Nov 21 13:45:04 crc kubenswrapper[5133]: I1121 13:45:04.669585 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-lz2px" event={"ID":"9c99dec9-4fc0-4576-81e4-703a690f4f45","Type":"ContainerStarted","Data":"11b6025d62e473dcdc2564cdeb13c85a4238c5023f84ea336307661cdb540a39"} Nov 21 13:45:04 crc kubenswrapper[5133]: I1121 13:45:04.669658 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-lz2px" event={"ID":"9c99dec9-4fc0-4576-81e4-703a690f4f45","Type":"ContainerStarted","Data":"59435c259bbdd7432029296b11f64ffe9fa7c87173826c4f049330c928999459"} Nov 21 13:45:04 crc kubenswrapper[5133]: I1121 13:45:04.690889 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:45:04 crc kubenswrapper[5133]: E1121 13:45:04.692712 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 13:45:05.192693471 +0000 UTC m=+164.990525719 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:04 crc kubenswrapper[5133]: I1121 13:45:04.700128 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-9qx4w" event={"ID":"5ff7be36-18ee-41ff-b8b6-4ea13387d7f2","Type":"ContainerStarted","Data":"8e93c6e38c33e2e1a72256c077773f50b3a7ed3bf71e0637968c39edaea038bb"} Nov 21 13:45:04 crc kubenswrapper[5133]: I1121 13:45:04.716536 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-6fn5b" event={"ID":"f49021c5-9619-47e6-a758-d800793dae08","Type":"ContainerStarted","Data":"4f923245143c71070abd336a7b35030a1eb5c924cb22048e9962ed1bf370ca01"} Nov 21 13:45:04 crc kubenswrapper[5133]: I1121 13:45:04.729060 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-zfc95" event={"ID":"89d95e05-f94e-4397-9cf2-54c67401b6e2","Type":"ContainerStarted","Data":"f6e9f77a921381f543b3dffd87e70f390ba1e026d5667fad89b518d20128b7ba"} Nov 21 13:45:04 crc kubenswrapper[5133]: I1121 13:45:04.732362 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-ddmrq" event={"ID":"8ccfd073-ef60-4c22-a8f9-80c50ed1ca42","Type":"ContainerStarted","Data":"235cc7bc8444582277043661ab2900f436dd257e8e8ed0cfd92fae0314c4039f"} Nov 21 13:45:04 crc kubenswrapper[5133]: I1121 13:45:04.732438 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-ddmrq" event={"ID":"8ccfd073-ef60-4c22-a8f9-80c50ed1ca42","Type":"ContainerStarted","Data":"1d6357c0d2c3b6ee0a5e98841f8b4b7fa644d7c553fdb11cd13e535f61d362b7"} Nov 21 13:45:04 crc kubenswrapper[5133]: I1121 13:45:04.744290 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-jpq5h" podStartSLOduration=132.74425548 podStartE2EDuration="2m12.74425548s" podCreationTimestamp="2025-11-21 13:42:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 13:45:04.743290134 +0000 UTC m=+164.541122382" watchObservedRunningTime="2025-11-21 13:45:04.74425548 +0000 UTC m=+164.542087728" Nov 21 13:45:04 crc kubenswrapper[5133]: I1121 13:45:04.750656 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-72h2c" Nov 21 13:45:04 crc kubenswrapper[5133]: I1121 13:45:04.751165 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-72h2c" Nov 21 13:45:04 crc kubenswrapper[5133]: I1121 13:45:04.755028 5133 patch_prober.go:28] interesting pod/apiserver-7bbb656c7d-72h2c container/oauth-apiserver namespace/openshift-oauth-apiserver: Startup probe status=failure output="Get \"https://10.217.0.14:8443/livez\": dial tcp 10.217.0.14:8443: connect: connection refused" start-of-body= Nov 21 13:45:04 crc kubenswrapper[5133]: I1121 13:45:04.755113 5133 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-72h2c" podUID="e9fdda75-f32d-445e-9658-d135d0a548ae" containerName="oauth-apiserver" probeResult="failure" output="Get \"https://10.217.0.14:8443/livez\": dial tcp 10.217.0.14:8443: connect: connection refused" Nov 21 13:45:04 crc kubenswrapper[5133]: I1121 13:45:04.774357 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-p6f2d" event={"ID":"6069f0ea-bc74-4858-a564-156202fbe36d","Type":"ContainerStarted","Data":"751c1965b9ea278760935ac255479a2d30e71a8634ea2128b581d4eab7a80d53"} Nov 21 13:45:04 crc kubenswrapper[5133]: I1121 13:45:04.794180 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-65hwm" event={"ID":"f7d3e23a-de5e-4562-9142-16955fe746ad","Type":"ContainerStarted","Data":"4dbe9d52cdc6795eb6cacf3aa1c1fd470401e8daf9290ef06534541193d92441"} Nov 21 13:45:04 crc kubenswrapper[5133]: I1121 13:45:04.796355 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:04 crc kubenswrapper[5133]: E1121 13:45:04.804365 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 13:45:05.304336887 +0000 UTC m=+165.102169135 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5rjt4" (UID: "fc3cef5b-e125-43c6-be9e-52ae6617e01a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:04 crc kubenswrapper[5133]: I1121 13:45:04.805203 5133 patch_prober.go:28] interesting pod/router-default-5444994796-x7zf8 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 21 13:45:04 crc kubenswrapper[5133]: [-]has-synced failed: reason withheld Nov 21 13:45:04 crc kubenswrapper[5133]: [+]process-running ok Nov 21 13:45:04 crc kubenswrapper[5133]: healthz check failed Nov 21 13:45:04 crc kubenswrapper[5133]: I1121 13:45:04.805284 5133 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-x7zf8" podUID="dc4b9d28-3c54-466b-b41d-0f1381490b02" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 21 13:45:04 crc kubenswrapper[5133]: I1121 13:45:04.826383 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-p929b" event={"ID":"e38db8a2-e4d9-49c7-9f87-81518e834b23","Type":"ContainerStarted","Data":"e5231dca9059461bd5858d3171569a04e4512a22c8453ba30a3eba4583990d0c"} Nov 21 13:45:04 crc kubenswrapper[5133]: I1121 13:45:04.827087 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-p929b" Nov 21 13:45:04 crc kubenswrapper[5133]: I1121 13:45:04.829047 5133 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-p929b container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.25:5443/healthz\": dial tcp 10.217.0.25:5443: connect: connection refused" start-of-body= Nov 21 13:45:04 crc kubenswrapper[5133]: I1121 13:45:04.829141 5133 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-p929b" podUID="e38db8a2-e4d9-49c7-9f87-81518e834b23" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.25:5443/healthz\": dial tcp 10.217.0.25:5443: connect: connection refused" Nov 21 13:45:04 crc kubenswrapper[5133]: I1121 13:45:04.840775 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-f9kbn" event={"ID":"f1a068fb-7569-4abd-a6cc-1eef000fd386","Type":"ContainerStarted","Data":"ace514eee3d91e83f4fc4b33d8a282d5bacda402a51a609519eaba297fb393d5"} Nov 21 13:45:04 crc kubenswrapper[5133]: I1121 13:45:04.845123 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-f9kbn" Nov 21 13:45:04 crc kubenswrapper[5133]: I1121 13:45:04.848158 5133 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-f9kbn container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.21:8443/healthz\": dial tcp 10.217.0.21:8443: connect: connection refused" start-of-body= Nov 21 13:45:04 crc kubenswrapper[5133]: I1121 13:45:04.848213 5133 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-f9kbn" podUID="f1a068fb-7569-4abd-a6cc-1eef000fd386" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.21:8443/healthz\": dial tcp 10.217.0.21:8443: connect: connection refused" Nov 21 13:45:04 crc kubenswrapper[5133]: I1121 13:45:04.852061 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-ddmrq" podStartSLOduration=131.852037133 podStartE2EDuration="2m11.852037133s" podCreationTimestamp="2025-11-21 13:42:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 13:45:04.848568971 +0000 UTC m=+164.646401219" watchObservedRunningTime="2025-11-21 13:45:04.852037133 +0000 UTC m=+164.649869381" Nov 21 13:45:04 crc kubenswrapper[5133]: I1121 13:45:04.860306 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-j9kwz" event={"ID":"4b5e9384-1ed0-4b0f-8a0a-ea288a701e4a","Type":"ContainerStarted","Data":"37bebef950c06f34a8ead395b8f2bde9851088eaf383792d0525b531826eafda"} Nov 21 13:45:04 crc kubenswrapper[5133]: I1121 13:45:04.894062 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-ntz66" event={"ID":"5c6271e9-d951-4d54-9db4-1b1df5c9d3f1","Type":"ContainerStarted","Data":"91ae5673d9b37594a2bf637098085c25899581d3b0eba5cbdf3dec4961f39c55"} Nov 21 13:45:04 crc kubenswrapper[5133]: I1121 13:45:04.896854 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:45:04 crc kubenswrapper[5133]: E1121 13:45:04.898243 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 13:45:05.398227349 +0000 UTC m=+165.196059597 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:04 crc kubenswrapper[5133]: I1121 13:45:04.903845 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-9rc8w" event={"ID":"f3cae218-7d6a-42c0-a269-ad8d27dfed75","Type":"ContainerStarted","Data":"e39367c4e7f34771c9e4c8e04de9ddd17db4b74e58224aac1b03c26bad8b7bd4"} Nov 21 13:45:04 crc kubenswrapper[5133]: I1121 13:45:04.904748 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-9rc8w" Nov 21 13:45:04 crc kubenswrapper[5133]: I1121 13:45:04.907617 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-7jqbx" event={"ID":"d7fb3acb-7ca2-40c1-bffa-beb98c676906","Type":"ContainerStarted","Data":"d5dfd5aa4462ed1674868b1d43576bbea5663b7bec62fa42f75a5e45f7a887ad"} Nov 21 13:45:04 crc kubenswrapper[5133]: I1121 13:45:04.912226 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-dn6hh" event={"ID":"04f53da9-c9c4-4af6-a8e1-37e91549a81a","Type":"ContainerStarted","Data":"1aa062b9ec8988601e57412b9dc2d037e82f71273a599404c705a15e4d8d7f48"} Nov 21 13:45:04 crc kubenswrapper[5133]: I1121 13:45:04.913479 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-mghzk" event={"ID":"e7a05b22-40d7-44b3-ab76-5b1548e042ba","Type":"ContainerStarted","Data":"90031fa895cb2fd1014aef034858413acb5691908b73c56eb5bc5e1cafc034c5"} Nov 21 13:45:04 crc kubenswrapper[5133]: I1121 13:45:04.915676 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-72h2c" event={"ID":"e9fdda75-f32d-445e-9658-d135d0a548ae","Type":"ContainerStarted","Data":"503f5437dbfba17e040cc6d1d54dfa4ac626a00a3c76f3cfb326fd13fca28e82"} Nov 21 13:45:04 crc kubenswrapper[5133]: I1121 13:45:04.918869 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-bhlrh" event={"ID":"a06ef925-e180-4dd7-b57f-3d3017be6bd0","Type":"ContainerStarted","Data":"d21f1d387f50b4ac8cb589e4f3a25865474093eb5281d6e2e4ff52a2534364ba"} Nov 21 13:45:04 crc kubenswrapper[5133]: I1121 13:45:04.920699 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-bhlrh" Nov 21 13:45:04 crc kubenswrapper[5133]: I1121 13:45:04.929718 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-lz2px" podStartSLOduration=131.92968587 podStartE2EDuration="2m11.92968587s" podCreationTimestamp="2025-11-21 13:42:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 13:45:04.920405712 +0000 UTC m=+164.718237960" watchObservedRunningTime="2025-11-21 13:45:04.92968587 +0000 UTC m=+164.727518118" Nov 21 13:45:04 crc kubenswrapper[5133]: I1121 13:45:04.932202 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-q7g4h" event={"ID":"9e90fcb6-b3a1-4545-8d65-c9e13a61828b","Type":"ContainerStarted","Data":"920f0120f80345e233a31a9b0329b809a08c8bfb8fad057f26c239b94648a858"} Nov 21 13:45:04 crc kubenswrapper[5133]: I1121 13:45:04.942243 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395545-wrdtj" event={"ID":"af22fa12-851f-4ec2-81f1-b3df1186e00c","Type":"ContainerStarted","Data":"d20c2728dcc96c4cd1306e3a9967d3012671b156ddf3797e7e37755333282238"} Nov 21 13:45:04 crc kubenswrapper[5133]: I1121 13:45:04.953518 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-dvnq8" event={"ID":"3923f6ff-3c6d-42d8-8000-91004cf58dd4","Type":"ContainerStarted","Data":"fbebb9c117a34023837bb2cb721638c051fbf0b3827e1c09156cabe38160ace0"} Nov 21 13:45:04 crc kubenswrapper[5133]: I1121 13:45:04.953605 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-dvnq8" event={"ID":"3923f6ff-3c6d-42d8-8000-91004cf58dd4","Type":"ContainerStarted","Data":"d0afb167e9f86de327a91dc8a4f6580d4fb6b75072ef93a8ad95e3ca76bcd93c"} Nov 21 13:45:04 crc kubenswrapper[5133]: I1121 13:45:04.977416 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-mp878" event={"ID":"b647f6d6-6425-43f9-9e70-3a66dee6f37c","Type":"ContainerStarted","Data":"2d62c51210cb898ca709a5ab6dd9b32895fef49129321b8a58607f31a2c53331"} Nov 21 13:45:04 crc kubenswrapper[5133]: I1121 13:45:04.981645 5133 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-zd8tq container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.23:8080/healthz\": dial tcp 10.217.0.23:8080: connect: connection refused" start-of-body= Nov 21 13:45:04 crc kubenswrapper[5133]: I1121 13:45:04.981706 5133 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-zd8tq" podUID="8bbf68aa-448e-453c-8df2-839594103920" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.23:8080/healthz\": dial tcp 10.217.0.23:8080: connect: connection refused" Nov 21 13:45:04 crc kubenswrapper[5133]: I1121 13:45:04.994467 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-ggrbw" Nov 21 13:45:04 crc kubenswrapper[5133]: I1121 13:45:04.999345 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:05 crc kubenswrapper[5133]: E1121 13:45:05.003084 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 13:45:05.503063833 +0000 UTC m=+165.300896081 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5rjt4" (UID: "fc3cef5b-e125-43c6-be9e-52ae6617e01a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:05 crc kubenswrapper[5133]: I1121 13:45:05.027813 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-zfc95" podStartSLOduration=132.027773434 podStartE2EDuration="2m12.027773434s" podCreationTimestamp="2025-11-21 13:42:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 13:45:05.024056695 +0000 UTC m=+164.821888943" watchObservedRunningTime="2025-11-21 13:45:05.027773434 +0000 UTC m=+164.825605682" Nov 21 13:45:05 crc kubenswrapper[5133]: I1121 13:45:05.100373 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-6fn5b" podStartSLOduration=133.100337484 podStartE2EDuration="2m13.100337484s" podCreationTimestamp="2025-11-21 13:42:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 13:45:05.097363745 +0000 UTC m=+164.895196003" watchObservedRunningTime="2025-11-21 13:45:05.100337484 +0000 UTC m=+164.898169732" Nov 21 13:45:05 crc kubenswrapper[5133]: E1121 13:45:05.105414 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 13:45:05.605388529 +0000 UTC m=+165.403220777 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:05 crc kubenswrapper[5133]: I1121 13:45:05.105275 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:45:05 crc kubenswrapper[5133]: I1121 13:45:05.106429 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:05 crc kubenswrapper[5133]: E1121 13:45:05.119539 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 13:45:05.619517957 +0000 UTC m=+165.417350395 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5rjt4" (UID: "fc3cef5b-e125-43c6-be9e-52ae6617e01a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:05 crc kubenswrapper[5133]: I1121 13:45:05.180334 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-bhlrh" podStartSLOduration=132.180315004 podStartE2EDuration="2m12.180315004s" podCreationTimestamp="2025-11-21 13:42:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 13:45:05.165926659 +0000 UTC m=+164.963758917" watchObservedRunningTime="2025-11-21 13:45:05.180315004 +0000 UTC m=+164.978147252" Nov 21 13:45:05 crc kubenswrapper[5133]: I1121 13:45:05.209701 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:45:05 crc kubenswrapper[5133]: E1121 13:45:05.210418 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 13:45:05.710396768 +0000 UTC m=+165.508229016 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:05 crc kubenswrapper[5133]: I1121 13:45:05.249729 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-f9kbn" podStartSLOduration=132.24970208 podStartE2EDuration="2m12.24970208s" podCreationTimestamp="2025-11-21 13:42:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 13:45:05.248337843 +0000 UTC m=+165.046170101" watchObservedRunningTime="2025-11-21 13:45:05.24970208 +0000 UTC m=+165.047534328" Nov 21 13:45:05 crc kubenswrapper[5133]: I1121 13:45:05.296779 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-j9kwz" podStartSLOduration=132.296743538 podStartE2EDuration="2m12.296743538s" podCreationTimestamp="2025-11-21 13:42:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 13:45:05.29382383 +0000 UTC m=+165.091656078" watchObservedRunningTime="2025-11-21 13:45:05.296743538 +0000 UTC m=+165.094575786" Nov 21 13:45:05 crc kubenswrapper[5133]: I1121 13:45:05.312695 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:05 crc kubenswrapper[5133]: E1121 13:45:05.313297 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 13:45:05.81327285 +0000 UTC m=+165.611105098 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5rjt4" (UID: "fc3cef5b-e125-43c6-be9e-52ae6617e01a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:05 crc kubenswrapper[5133]: I1121 13:45:05.414157 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:45:05 crc kubenswrapper[5133]: E1121 13:45:05.414294 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 13:45:05.914268352 +0000 UTC m=+165.712100600 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:05 crc kubenswrapper[5133]: I1121 13:45:05.414668 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:05 crc kubenswrapper[5133]: E1121 13:45:05.415195 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 13:45:05.915172076 +0000 UTC m=+165.713004324 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5rjt4" (UID: "fc3cef5b-e125-43c6-be9e-52ae6617e01a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:05 crc kubenswrapper[5133]: I1121 13:45:05.436201 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-ntz66" podStartSLOduration=133.436171618 podStartE2EDuration="2m13.436171618s" podCreationTimestamp="2025-11-21 13:42:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 13:45:05.426021656 +0000 UTC m=+165.223853894" watchObservedRunningTime="2025-11-21 13:45:05.436171618 +0000 UTC m=+165.234003856" Nov 21 13:45:05 crc kubenswrapper[5133]: I1121 13:45:05.436763 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-dvnq8" podStartSLOduration=132.436759163 podStartE2EDuration="2m12.436759163s" podCreationTimestamp="2025-11-21 13:42:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 13:45:05.358238793 +0000 UTC m=+165.156071051" watchObservedRunningTime="2025-11-21 13:45:05.436759163 +0000 UTC m=+165.234591411" Nov 21 13:45:05 crc kubenswrapper[5133]: I1121 13:45:05.463314 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-mghzk" podStartSLOduration=132.463295263 podStartE2EDuration="2m12.463295263s" podCreationTimestamp="2025-11-21 13:42:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 13:45:05.459864551 +0000 UTC m=+165.257696799" watchObservedRunningTime="2025-11-21 13:45:05.463295263 +0000 UTC m=+165.261127511" Nov 21 13:45:05 crc kubenswrapper[5133]: I1121 13:45:05.486669 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-p929b" podStartSLOduration=132.486643968 podStartE2EDuration="2m12.486643968s" podCreationTimestamp="2025-11-21 13:42:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 13:45:05.485663532 +0000 UTC m=+165.283495780" watchObservedRunningTime="2025-11-21 13:45:05.486643968 +0000 UTC m=+165.284476216" Nov 21 13:45:05 crc kubenswrapper[5133]: I1121 13:45:05.515829 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:45:05 crc kubenswrapper[5133]: E1121 13:45:05.516256 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 13:45:06.016232199 +0000 UTC m=+165.814064437 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:05 crc kubenswrapper[5133]: I1121 13:45:05.541827 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-z98jb" Nov 21 13:45:05 crc kubenswrapper[5133]: I1121 13:45:05.618289 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:05 crc kubenswrapper[5133]: E1121 13:45:05.618723 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 13:45:06.118705251 +0000 UTC m=+165.916537489 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5rjt4" (UID: "fc3cef5b-e125-43c6-be9e-52ae6617e01a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:05 crc kubenswrapper[5133]: I1121 13:45:05.720095 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:45:05 crc kubenswrapper[5133]: E1121 13:45:05.720362 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 13:45:06.220331549 +0000 UTC m=+166.018163797 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:05 crc kubenswrapper[5133]: I1121 13:45:05.720499 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:05 crc kubenswrapper[5133]: E1121 13:45:05.720952 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 13:45:06.220930995 +0000 UTC m=+166.018763243 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5rjt4" (UID: "fc3cef5b-e125-43c6-be9e-52ae6617e01a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:05 crc kubenswrapper[5133]: I1121 13:45:05.740055 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-q7g4h" podStartSLOduration=132.740035366 podStartE2EDuration="2m12.740035366s" podCreationTimestamp="2025-11-21 13:42:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 13:45:05.657348724 +0000 UTC m=+165.455180972" watchObservedRunningTime="2025-11-21 13:45:05.740035366 +0000 UTC m=+165.537867614" Nov 21 13:45:05 crc kubenswrapper[5133]: I1121 13:45:05.741036 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-9rc8w" podStartSLOduration=133.741030773 podStartE2EDuration="2m13.741030773s" podCreationTimestamp="2025-11-21 13:42:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 13:45:05.74017264 +0000 UTC m=+165.538004888" watchObservedRunningTime="2025-11-21 13:45:05.741030773 +0000 UTC m=+165.538863011" Nov 21 13:45:05 crc kubenswrapper[5133]: I1121 13:45:05.798862 5133 patch_prober.go:28] interesting pod/router-default-5444994796-x7zf8 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 21 13:45:05 crc kubenswrapper[5133]: [-]has-synced failed: reason withheld Nov 21 13:45:05 crc kubenswrapper[5133]: [+]process-running ok Nov 21 13:45:05 crc kubenswrapper[5133]: healthz check failed Nov 21 13:45:05 crc kubenswrapper[5133]: I1121 13:45:05.798969 5133 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-x7zf8" podUID="dc4b9d28-3c54-466b-b41d-0f1381490b02" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 21 13:45:05 crc kubenswrapper[5133]: I1121 13:45:05.821411 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:45:05 crc kubenswrapper[5133]: E1121 13:45:05.821624 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 13:45:06.321594028 +0000 UTC m=+166.119426276 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:05 crc kubenswrapper[5133]: I1121 13:45:05.821710 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:05 crc kubenswrapper[5133]: E1121 13:45:05.822053 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 13:45:06.32204547 +0000 UTC m=+166.119877718 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5rjt4" (UID: "fc3cef5b-e125-43c6-be9e-52ae6617e01a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:05 crc kubenswrapper[5133]: I1121 13:45:05.853473 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-mp878" podStartSLOduration=132.85345074 podStartE2EDuration="2m12.85345074s" podCreationTimestamp="2025-11-21 13:42:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 13:45:05.852758271 +0000 UTC m=+165.650590529" watchObservedRunningTime="2025-11-21 13:45:05.85345074 +0000 UTC m=+165.651282988" Nov 21 13:45:05 crc kubenswrapper[5133]: I1121 13:45:05.922639 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:45:05 crc kubenswrapper[5133]: E1121 13:45:05.922882 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 13:45:06.422842006 +0000 UTC m=+166.220674254 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:05 crc kubenswrapper[5133]: I1121 13:45:05.922966 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:05 crc kubenswrapper[5133]: E1121 13:45:05.923466 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 13:45:06.423454392 +0000 UTC m=+166.221286640 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5rjt4" (UID: "fc3cef5b-e125-43c6-be9e-52ae6617e01a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:05 crc kubenswrapper[5133]: I1121 13:45:05.992685 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-9zjjz" event={"ID":"63aa8ca7-9e72-4f3c-85e9-b9a423371b21","Type":"ContainerStarted","Data":"79c620253bdea52df017064d683ca7116465637ad3ba9f2d0376b412eed1e451"} Nov 21 13:45:06 crc kubenswrapper[5133]: I1121 13:45:06.004235 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-ntz66" event={"ID":"5c6271e9-d951-4d54-9db4-1b1df5c9d3f1","Type":"ContainerStarted","Data":"c088104abe47bb74ed78f5003dcb9782167c969f2c6e13a7a299bde42b1b4c8b"} Nov 21 13:45:06 crc kubenswrapper[5133]: I1121 13:45:06.010809 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-9qx4w" event={"ID":"5ff7be36-18ee-41ff-b8b6-4ea13387d7f2","Type":"ContainerStarted","Data":"12fb94935a9c9adea4fdbf1a9e7f21035cd27933f47150bb160bff7e4abdfe9b"} Nov 21 13:45:06 crc kubenswrapper[5133]: I1121 13:45:06.010952 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-9qx4w" Nov 21 13:45:06 crc kubenswrapper[5133]: I1121 13:45:06.023710 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:45:06 crc kubenswrapper[5133]: E1121 13:45:06.023889 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 13:45:06.523864788 +0000 UTC m=+166.321697026 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:06 crc kubenswrapper[5133]: I1121 13:45:06.024049 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:06 crc kubenswrapper[5133]: I1121 13:45:06.024494 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-dn6hh" event={"ID":"04f53da9-c9c4-4af6-a8e1-37e91549a81a","Type":"ContainerStarted","Data":"0e1a2589adc60667dbd9ea26e9fefe53b9d8f9dfa67c74b53818740748039d3f"} Nov 21 13:45:06 crc kubenswrapper[5133]: E1121 13:45:06.024758 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 13:45:06.524748672 +0000 UTC m=+166.322580920 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5rjt4" (UID: "fc3cef5b-e125-43c6-be9e-52ae6617e01a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:06 crc kubenswrapper[5133]: I1121 13:45:06.025822 5133 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-zd8tq container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.23:8080/healthz\": dial tcp 10.217.0.23:8080: connect: connection refused" start-of-body= Nov 21 13:45:06 crc kubenswrapper[5133]: I1121 13:45:06.025878 5133 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-zd8tq" podUID="8bbf68aa-448e-453c-8df2-839594103920" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.23:8080/healthz\": dial tcp 10.217.0.23:8080: connect: connection refused" Nov 21 13:45:06 crc kubenswrapper[5133]: I1121 13:45:06.042914 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-f9kbn" Nov 21 13:45:06 crc kubenswrapper[5133]: I1121 13:45:06.049281 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29395545-wrdtj" podStartSLOduration=6.049258288 podStartE2EDuration="6.049258288s" podCreationTimestamp="2025-11-21 13:45:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 13:45:06.045584429 +0000 UTC m=+165.843416677" watchObservedRunningTime="2025-11-21 13:45:06.049258288 +0000 UTC m=+165.847090536" Nov 21 13:45:06 crc kubenswrapper[5133]: I1121 13:45:06.062439 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-sjflw" Nov 21 13:45:06 crc kubenswrapper[5133]: I1121 13:45:06.125602 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:45:06 crc kubenswrapper[5133]: E1121 13:45:06.125808 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 13:45:06.625771224 +0000 UTC m=+166.423603472 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:06 crc kubenswrapper[5133]: I1121 13:45:06.126721 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:06 crc kubenswrapper[5133]: E1121 13:45:06.130891 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 13:45:06.630869421 +0000 UTC m=+166.428701729 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5rjt4" (UID: "fc3cef5b-e125-43c6-be9e-52ae6617e01a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:06 crc kubenswrapper[5133]: I1121 13:45:06.232018 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-7jqbx" podStartSLOduration=133.231970325 podStartE2EDuration="2m13.231970325s" podCreationTimestamp="2025-11-21 13:42:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 13:45:06.203562875 +0000 UTC m=+166.001395133" watchObservedRunningTime="2025-11-21 13:45:06.231970325 +0000 UTC m=+166.029802573" Nov 21 13:45:06 crc kubenswrapper[5133]: I1121 13:45:06.239235 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:45:06 crc kubenswrapper[5133]: E1121 13:45:06.239459 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 13:45:06.739425185 +0000 UTC m=+166.537257443 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:06 crc kubenswrapper[5133]: I1121 13:45:06.239551 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:06 crc kubenswrapper[5133]: E1121 13:45:06.239987 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 13:45:06.739968739 +0000 UTC m=+166.537800977 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5rjt4" (UID: "fc3cef5b-e125-43c6-be9e-52ae6617e01a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:06 crc kubenswrapper[5133]: I1121 13:45:06.342620 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:45:06 crc kubenswrapper[5133]: E1121 13:45:06.343172 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 13:45:06.843154779 +0000 UTC m=+166.640987027 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:06 crc kubenswrapper[5133]: I1121 13:45:06.360678 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-9qx4w" podStartSLOduration=9.360661498 podStartE2EDuration="9.360661498s" podCreationTimestamp="2025-11-21 13:44:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 13:45:06.357609476 +0000 UTC m=+166.155441724" watchObservedRunningTime="2025-11-21 13:45:06.360661498 +0000 UTC m=+166.158493746" Nov 21 13:45:06 crc kubenswrapper[5133]: I1121 13:45:06.444493 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:06 crc kubenswrapper[5133]: E1121 13:45:06.444944 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 13:45:06.944925422 +0000 UTC m=+166.742757670 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5rjt4" (UID: "fc3cef5b-e125-43c6-be9e-52ae6617e01a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:06 crc kubenswrapper[5133]: I1121 13:45:06.503088 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-dn6hh" podStartSLOduration=134.503070767 podStartE2EDuration="2m14.503070767s" podCreationTimestamp="2025-11-21 13:42:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 13:45:06.502642765 +0000 UTC m=+166.300475023" watchObservedRunningTime="2025-11-21 13:45:06.503070767 +0000 UTC m=+166.300903015" Nov 21 13:45:06 crc kubenswrapper[5133]: I1121 13:45:06.546847 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:45:06 crc kubenswrapper[5133]: E1121 13:45:06.547284 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 13:45:07.047261719 +0000 UTC m=+166.845093967 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:06 crc kubenswrapper[5133]: I1121 13:45:06.648091 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:06 crc kubenswrapper[5133]: E1121 13:45:06.648664 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 13:45:07.148639031 +0000 UTC m=+166.946471279 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5rjt4" (UID: "fc3cef5b-e125-43c6-be9e-52ae6617e01a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:06 crc kubenswrapper[5133]: I1121 13:45:06.749721 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:45:06 crc kubenswrapper[5133]: E1121 13:45:06.749937 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 13:45:07.249896129 +0000 UTC m=+167.047728377 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:06 crc kubenswrapper[5133]: I1121 13:45:06.750511 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:06 crc kubenswrapper[5133]: E1121 13:45:06.751053 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 13:45:07.25104359 +0000 UTC m=+167.048875838 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5rjt4" (UID: "fc3cef5b-e125-43c6-be9e-52ae6617e01a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:06 crc kubenswrapper[5133]: I1121 13:45:06.799078 5133 patch_prober.go:28] interesting pod/router-default-5444994796-x7zf8 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 21 13:45:06 crc kubenswrapper[5133]: [-]has-synced failed: reason withheld Nov 21 13:45:06 crc kubenswrapper[5133]: [+]process-running ok Nov 21 13:45:06 crc kubenswrapper[5133]: healthz check failed Nov 21 13:45:06 crc kubenswrapper[5133]: I1121 13:45:06.799205 5133 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-x7zf8" podUID="dc4b9d28-3c54-466b-b41d-0f1381490b02" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 21 13:45:06 crc kubenswrapper[5133]: I1121 13:45:06.852090 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:45:06 crc kubenswrapper[5133]: E1121 13:45:06.852327 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 13:45:07.352298769 +0000 UTC m=+167.150131017 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:06 crc kubenswrapper[5133]: I1121 13:45:06.852550 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:06 crc kubenswrapper[5133]: E1121 13:45:06.852940 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 13:45:07.352930475 +0000 UTC m=+167.150762723 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5rjt4" (UID: "fc3cef5b-e125-43c6-be9e-52ae6617e01a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:06 crc kubenswrapper[5133]: I1121 13:45:06.953581 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:45:06 crc kubenswrapper[5133]: E1121 13:45:06.954194 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 13:45:07.454157763 +0000 UTC m=+167.251990011 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:07 crc kubenswrapper[5133]: I1121 13:45:07.026343 5133 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-p929b container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.25:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 21 13:45:07 crc kubenswrapper[5133]: I1121 13:45:07.026405 5133 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-p929b" podUID="e38db8a2-e4d9-49c7-9f87-81518e834b23" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.25:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 21 13:45:07 crc kubenswrapper[5133]: I1121 13:45:07.038306 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-9rc8w" Nov 21 13:45:07 crc kubenswrapper[5133]: I1121 13:45:07.055703 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:07 crc kubenswrapper[5133]: E1121 13:45:07.056114 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 13:45:07.55609639 +0000 UTC m=+167.353928638 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5rjt4" (UID: "fc3cef5b-e125-43c6-be9e-52ae6617e01a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:07 crc kubenswrapper[5133]: I1121 13:45:07.157414 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:45:07 crc kubenswrapper[5133]: E1121 13:45:07.157648 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 13:45:07.657613266 +0000 UTC m=+167.455445514 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:07 crc kubenswrapper[5133]: I1121 13:45:07.158594 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:07 crc kubenswrapper[5133]: E1121 13:45:07.160732 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 13:45:07.660716779 +0000 UTC m=+167.458549027 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5rjt4" (UID: "fc3cef5b-e125-43c6-be9e-52ae6617e01a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:07 crc kubenswrapper[5133]: I1121 13:45:07.260785 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:45:07 crc kubenswrapper[5133]: E1121 13:45:07.260975 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 13:45:07.76094174 +0000 UTC m=+167.558773988 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:07 crc kubenswrapper[5133]: I1121 13:45:07.261253 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:07 crc kubenswrapper[5133]: E1121 13:45:07.261673 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 13:45:07.761656489 +0000 UTC m=+167.559488737 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5rjt4" (UID: "fc3cef5b-e125-43c6-be9e-52ae6617e01a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:07 crc kubenswrapper[5133]: I1121 13:45:07.362625 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:45:07 crc kubenswrapper[5133]: E1121 13:45:07.362881 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 13:45:07.862840215 +0000 UTC m=+167.660672463 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:07 crc kubenswrapper[5133]: I1121 13:45:07.363892 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:07 crc kubenswrapper[5133]: E1121 13:45:07.364326 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 13:45:07.864307105 +0000 UTC m=+167.662139353 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5rjt4" (UID: "fc3cef5b-e125-43c6-be9e-52ae6617e01a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:07 crc kubenswrapper[5133]: I1121 13:45:07.465790 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:45:07 crc kubenswrapper[5133]: E1121 13:45:07.466164 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 13:45:07.966148919 +0000 UTC m=+167.763981157 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:07 crc kubenswrapper[5133]: I1121 13:45:07.567174 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:07 crc kubenswrapper[5133]: E1121 13:45:07.567511 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 13:45:08.06749595 +0000 UTC m=+167.865328188 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5rjt4" (UID: "fc3cef5b-e125-43c6-be9e-52ae6617e01a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:07 crc kubenswrapper[5133]: I1121 13:45:07.650043 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-rfbh2"] Nov 21 13:45:07 crc kubenswrapper[5133]: I1121 13:45:07.651219 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rfbh2" Nov 21 13:45:07 crc kubenswrapper[5133]: I1121 13:45:07.653867 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 21 13:45:07 crc kubenswrapper[5133]: I1121 13:45:07.668345 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:45:07 crc kubenswrapper[5133]: E1121 13:45:07.668643 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 13:45:08.168628035 +0000 UTC m=+167.966460283 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:07 crc kubenswrapper[5133]: I1121 13:45:07.676774 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-rfbh2"] Nov 21 13:45:07 crc kubenswrapper[5133]: I1121 13:45:07.711963 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-p929b" Nov 21 13:45:07 crc kubenswrapper[5133]: I1121 13:45:07.770042 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a6b4a203-3329-4e08-b2d6-61ec7ee18e7e-catalog-content\") pod \"community-operators-rfbh2\" (UID: \"a6b4a203-3329-4e08-b2d6-61ec7ee18e7e\") " pod="openshift-marketplace/community-operators-rfbh2" Nov 21 13:45:07 crc kubenswrapper[5133]: I1121 13:45:07.770087 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5ffjm\" (UniqueName: \"kubernetes.io/projected/a6b4a203-3329-4e08-b2d6-61ec7ee18e7e-kube-api-access-5ffjm\") pod \"community-operators-rfbh2\" (UID: \"a6b4a203-3329-4e08-b2d6-61ec7ee18e7e\") " pod="openshift-marketplace/community-operators-rfbh2" Nov 21 13:45:07 crc kubenswrapper[5133]: I1121 13:45:07.770123 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:07 crc kubenswrapper[5133]: I1121 13:45:07.770170 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a6b4a203-3329-4e08-b2d6-61ec7ee18e7e-utilities\") pod \"community-operators-rfbh2\" (UID: \"a6b4a203-3329-4e08-b2d6-61ec7ee18e7e\") " pod="openshift-marketplace/community-operators-rfbh2" Nov 21 13:45:07 crc kubenswrapper[5133]: E1121 13:45:07.770475 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 13:45:08.270462159 +0000 UTC m=+168.068294407 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5rjt4" (UID: "fc3cef5b-e125-43c6-be9e-52ae6617e01a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:07 crc kubenswrapper[5133]: I1121 13:45:07.798834 5133 patch_prober.go:28] interesting pod/router-default-5444994796-x7zf8 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 21 13:45:07 crc kubenswrapper[5133]: [-]has-synced failed: reason withheld Nov 21 13:45:07 crc kubenswrapper[5133]: [+]process-running ok Nov 21 13:45:07 crc kubenswrapper[5133]: healthz check failed Nov 21 13:45:07 crc kubenswrapper[5133]: I1121 13:45:07.798904 5133 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-x7zf8" podUID="dc4b9d28-3c54-466b-b41d-0f1381490b02" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 21 13:45:07 crc kubenswrapper[5133]: I1121 13:45:07.852818 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-xp47m"] Nov 21 13:45:07 crc kubenswrapper[5133]: I1121 13:45:07.853940 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xp47m" Nov 21 13:45:07 crc kubenswrapper[5133]: I1121 13:45:07.855882 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 21 13:45:07 crc kubenswrapper[5133]: I1121 13:45:07.870942 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:45:07 crc kubenswrapper[5133]: I1121 13:45:07.871470 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a6b4a203-3329-4e08-b2d6-61ec7ee18e7e-catalog-content\") pod \"community-operators-rfbh2\" (UID: \"a6b4a203-3329-4e08-b2d6-61ec7ee18e7e\") " pod="openshift-marketplace/community-operators-rfbh2" Nov 21 13:45:07 crc kubenswrapper[5133]: I1121 13:45:07.871599 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5ffjm\" (UniqueName: \"kubernetes.io/projected/a6b4a203-3329-4e08-b2d6-61ec7ee18e7e-kube-api-access-5ffjm\") pod \"community-operators-rfbh2\" (UID: \"a6b4a203-3329-4e08-b2d6-61ec7ee18e7e\") " pod="openshift-marketplace/community-operators-rfbh2" Nov 21 13:45:07 crc kubenswrapper[5133]: I1121 13:45:07.871740 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a6b4a203-3329-4e08-b2d6-61ec7ee18e7e-utilities\") pod \"community-operators-rfbh2\" (UID: \"a6b4a203-3329-4e08-b2d6-61ec7ee18e7e\") " pod="openshift-marketplace/community-operators-rfbh2" Nov 21 13:45:07 crc kubenswrapper[5133]: I1121 13:45:07.872691 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a6b4a203-3329-4e08-b2d6-61ec7ee18e7e-utilities\") pod \"community-operators-rfbh2\" (UID: \"a6b4a203-3329-4e08-b2d6-61ec7ee18e7e\") " pod="openshift-marketplace/community-operators-rfbh2" Nov 21 13:45:07 crc kubenswrapper[5133]: E1121 13:45:07.872869 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 13:45:08.372847698 +0000 UTC m=+168.170679956 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:07 crc kubenswrapper[5133]: I1121 13:45:07.873261 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a6b4a203-3329-4e08-b2d6-61ec7ee18e7e-catalog-content\") pod \"community-operators-rfbh2\" (UID: \"a6b4a203-3329-4e08-b2d6-61ec7ee18e7e\") " pod="openshift-marketplace/community-operators-rfbh2" Nov 21 13:45:07 crc kubenswrapper[5133]: I1121 13:45:07.876985 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-xp47m"] Nov 21 13:45:07 crc kubenswrapper[5133]: I1121 13:45:07.910735 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5ffjm\" (UniqueName: \"kubernetes.io/projected/a6b4a203-3329-4e08-b2d6-61ec7ee18e7e-kube-api-access-5ffjm\") pod \"community-operators-rfbh2\" (UID: \"a6b4a203-3329-4e08-b2d6-61ec7ee18e7e\") " pod="openshift-marketplace/community-operators-rfbh2" Nov 21 13:45:07 crc kubenswrapper[5133]: I1121 13:45:07.967549 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rfbh2" Nov 21 13:45:07 crc kubenswrapper[5133]: I1121 13:45:07.972789 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ce3b64c0-a2cb-416d-9c9a-14c10514bfac-utilities\") pod \"certified-operators-xp47m\" (UID: \"ce3b64c0-a2cb-416d-9c9a-14c10514bfac\") " pod="openshift-marketplace/certified-operators-xp47m" Nov 21 13:45:07 crc kubenswrapper[5133]: I1121 13:45:07.972941 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ce3b64c0-a2cb-416d-9c9a-14c10514bfac-catalog-content\") pod \"certified-operators-xp47m\" (UID: \"ce3b64c0-a2cb-416d-9c9a-14c10514bfac\") " pod="openshift-marketplace/certified-operators-xp47m" Nov 21 13:45:07 crc kubenswrapper[5133]: I1121 13:45:07.973080 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:07 crc kubenswrapper[5133]: I1121 13:45:07.973200 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-glkqz\" (UniqueName: \"kubernetes.io/projected/ce3b64c0-a2cb-416d-9c9a-14c10514bfac-kube-api-access-glkqz\") pod \"certified-operators-xp47m\" (UID: \"ce3b64c0-a2cb-416d-9c9a-14c10514bfac\") " pod="openshift-marketplace/certified-operators-xp47m" Nov 21 13:45:07 crc kubenswrapper[5133]: E1121 13:45:07.973598 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 13:45:08.473584693 +0000 UTC m=+168.271416941 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5rjt4" (UID: "fc3cef5b-e125-43c6-be9e-52ae6617e01a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:08 crc kubenswrapper[5133]: I1121 13:45:08.039399 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-w6r59"] Nov 21 13:45:08 crc kubenswrapper[5133]: I1121 13:45:08.041468 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-w6r59" Nov 21 13:45:08 crc kubenswrapper[5133]: I1121 13:45:08.059617 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-9zjjz" event={"ID":"63aa8ca7-9e72-4f3c-85e9-b9a423371b21","Type":"ContainerStarted","Data":"48932390415fdb349ab394d9b31705ee36a61f360ac4c368cbff91da8cb22755"} Nov 21 13:45:08 crc kubenswrapper[5133]: I1121 13:45:08.059706 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-w6r59"] Nov 21 13:45:08 crc kubenswrapper[5133]: I1121 13:45:08.077989 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:45:08 crc kubenswrapper[5133]: I1121 13:45:08.078669 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-glkqz\" (UniqueName: \"kubernetes.io/projected/ce3b64c0-a2cb-416d-9c9a-14c10514bfac-kube-api-access-glkqz\") pod \"certified-operators-xp47m\" (UID: \"ce3b64c0-a2cb-416d-9c9a-14c10514bfac\") " pod="openshift-marketplace/certified-operators-xp47m" Nov 21 13:45:08 crc kubenswrapper[5133]: I1121 13:45:08.078743 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ce3b64c0-a2cb-416d-9c9a-14c10514bfac-utilities\") pod \"certified-operators-xp47m\" (UID: \"ce3b64c0-a2cb-416d-9c9a-14c10514bfac\") " pod="openshift-marketplace/certified-operators-xp47m" Nov 21 13:45:08 crc kubenswrapper[5133]: I1121 13:45:08.078787 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ce3b64c0-a2cb-416d-9c9a-14c10514bfac-catalog-content\") pod \"certified-operators-xp47m\" (UID: \"ce3b64c0-a2cb-416d-9c9a-14c10514bfac\") " pod="openshift-marketplace/certified-operators-xp47m" Nov 21 13:45:08 crc kubenswrapper[5133]: E1121 13:45:08.078814 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 13:45:08.578782237 +0000 UTC m=+168.376614485 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:08 crc kubenswrapper[5133]: I1121 13:45:08.078881 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:08 crc kubenswrapper[5133]: I1121 13:45:08.079466 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ce3b64c0-a2cb-416d-9c9a-14c10514bfac-catalog-content\") pod \"certified-operators-xp47m\" (UID: \"ce3b64c0-a2cb-416d-9c9a-14c10514bfac\") " pod="openshift-marketplace/certified-operators-xp47m" Nov 21 13:45:08 crc kubenswrapper[5133]: E1121 13:45:08.079530 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 13:45:08.579515907 +0000 UTC m=+168.377348155 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5rjt4" (UID: "fc3cef5b-e125-43c6-be9e-52ae6617e01a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:08 crc kubenswrapper[5133]: I1121 13:45:08.080112 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ce3b64c0-a2cb-416d-9c9a-14c10514bfac-utilities\") pod \"certified-operators-xp47m\" (UID: \"ce3b64c0-a2cb-416d-9c9a-14c10514bfac\") " pod="openshift-marketplace/certified-operators-xp47m" Nov 21 13:45:08 crc kubenswrapper[5133]: I1121 13:45:08.125951 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-glkqz\" (UniqueName: \"kubernetes.io/projected/ce3b64c0-a2cb-416d-9c9a-14c10514bfac-kube-api-access-glkqz\") pod \"certified-operators-xp47m\" (UID: \"ce3b64c0-a2cb-416d-9c9a-14c10514bfac\") " pod="openshift-marketplace/certified-operators-xp47m" Nov 21 13:45:08 crc kubenswrapper[5133]: I1121 13:45:08.174687 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xp47m" Nov 21 13:45:08 crc kubenswrapper[5133]: I1121 13:45:08.181574 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:45:08 crc kubenswrapper[5133]: I1121 13:45:08.181817 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/297f6c8b-efd5-4df1-901e-d66882a776c5-utilities\") pod \"community-operators-w6r59\" (UID: \"297f6c8b-efd5-4df1-901e-d66882a776c5\") " pod="openshift-marketplace/community-operators-w6r59" Nov 21 13:45:08 crc kubenswrapper[5133]: I1121 13:45:08.181849 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/297f6c8b-efd5-4df1-901e-d66882a776c5-catalog-content\") pod \"community-operators-w6r59\" (UID: \"297f6c8b-efd5-4df1-901e-d66882a776c5\") " pod="openshift-marketplace/community-operators-w6r59" Nov 21 13:45:08 crc kubenswrapper[5133]: I1121 13:45:08.181908 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zkw7w\" (UniqueName: \"kubernetes.io/projected/297f6c8b-efd5-4df1-901e-d66882a776c5-kube-api-access-zkw7w\") pod \"community-operators-w6r59\" (UID: \"297f6c8b-efd5-4df1-901e-d66882a776c5\") " pod="openshift-marketplace/community-operators-w6r59" Nov 21 13:45:08 crc kubenswrapper[5133]: E1121 13:45:08.182642 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 13:45:08.682623495 +0000 UTC m=+168.480455743 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:08 crc kubenswrapper[5133]: I1121 13:45:08.238137 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-b9rk5"] Nov 21 13:45:08 crc kubenswrapper[5133]: I1121 13:45:08.242506 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-b9rk5" Nov 21 13:45:08 crc kubenswrapper[5133]: I1121 13:45:08.283513 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/297f6c8b-efd5-4df1-901e-d66882a776c5-utilities\") pod \"community-operators-w6r59\" (UID: \"297f6c8b-efd5-4df1-901e-d66882a776c5\") " pod="openshift-marketplace/community-operators-w6r59" Nov 21 13:45:08 crc kubenswrapper[5133]: I1121 13:45:08.283551 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/297f6c8b-efd5-4df1-901e-d66882a776c5-catalog-content\") pod \"community-operators-w6r59\" (UID: \"297f6c8b-efd5-4df1-901e-d66882a776c5\") " pod="openshift-marketplace/community-operators-w6r59" Nov 21 13:45:08 crc kubenswrapper[5133]: I1121 13:45:08.283583 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zkw7w\" (UniqueName: \"kubernetes.io/projected/297f6c8b-efd5-4df1-901e-d66882a776c5-kube-api-access-zkw7w\") pod \"community-operators-w6r59\" (UID: \"297f6c8b-efd5-4df1-901e-d66882a776c5\") " pod="openshift-marketplace/community-operators-w6r59" Nov 21 13:45:08 crc kubenswrapper[5133]: I1121 13:45:08.283622 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:08 crc kubenswrapper[5133]: E1121 13:45:08.283926 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 13:45:08.783913164 +0000 UTC m=+168.581745412 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5rjt4" (UID: "fc3cef5b-e125-43c6-be9e-52ae6617e01a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:08 crc kubenswrapper[5133]: I1121 13:45:08.284158 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/297f6c8b-efd5-4df1-901e-d66882a776c5-catalog-content\") pod \"community-operators-w6r59\" (UID: \"297f6c8b-efd5-4df1-901e-d66882a776c5\") " pod="openshift-marketplace/community-operators-w6r59" Nov 21 13:45:08 crc kubenswrapper[5133]: I1121 13:45:08.284386 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/297f6c8b-efd5-4df1-901e-d66882a776c5-utilities\") pod \"community-operators-w6r59\" (UID: \"297f6c8b-efd5-4df1-901e-d66882a776c5\") " pod="openshift-marketplace/community-operators-w6r59" Nov 21 13:45:08 crc kubenswrapper[5133]: I1121 13:45:08.296868 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-b9rk5"] Nov 21 13:45:08 crc kubenswrapper[5133]: I1121 13:45:08.306706 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zkw7w\" (UniqueName: \"kubernetes.io/projected/297f6c8b-efd5-4df1-901e-d66882a776c5-kube-api-access-zkw7w\") pod \"community-operators-w6r59\" (UID: \"297f6c8b-efd5-4df1-901e-d66882a776c5\") " pod="openshift-marketplace/community-operators-w6r59" Nov 21 13:45:08 crc kubenswrapper[5133]: I1121 13:45:08.327935 5133 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Nov 21 13:45:08 crc kubenswrapper[5133]: I1121 13:45:08.385309 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:45:08 crc kubenswrapper[5133]: I1121 13:45:08.385726 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/94664536-aa38-4b5b-b7b2-fa28904da365-utilities\") pod \"certified-operators-b9rk5\" (UID: \"94664536-aa38-4b5b-b7b2-fa28904da365\") " pod="openshift-marketplace/certified-operators-b9rk5" Nov 21 13:45:08 crc kubenswrapper[5133]: I1121 13:45:08.385774 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/94664536-aa38-4b5b-b7b2-fa28904da365-catalog-content\") pod \"certified-operators-b9rk5\" (UID: \"94664536-aa38-4b5b-b7b2-fa28904da365\") " pod="openshift-marketplace/certified-operators-b9rk5" Nov 21 13:45:08 crc kubenswrapper[5133]: E1121 13:45:08.385868 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 13:45:08.88582788 +0000 UTC m=+168.683660148 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:08 crc kubenswrapper[5133]: I1121 13:45:08.386016 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4jjqm\" (UniqueName: \"kubernetes.io/projected/94664536-aa38-4b5b-b7b2-fa28904da365-kube-api-access-4jjqm\") pod \"certified-operators-b9rk5\" (UID: \"94664536-aa38-4b5b-b7b2-fa28904da365\") " pod="openshift-marketplace/certified-operators-b9rk5" Nov 21 13:45:08 crc kubenswrapper[5133]: I1121 13:45:08.417069 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-w6r59" Nov 21 13:45:08 crc kubenswrapper[5133]: I1121 13:45:08.423866 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-rfbh2"] Nov 21 13:45:08 crc kubenswrapper[5133]: I1121 13:45:08.488701 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:08 crc kubenswrapper[5133]: E1121 13:45:08.489166 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 13:45:08.989149934 +0000 UTC m=+168.786982182 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5rjt4" (UID: "fc3cef5b-e125-43c6-be9e-52ae6617e01a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:08 crc kubenswrapper[5133]: I1121 13:45:08.489288 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/94664536-aa38-4b5b-b7b2-fa28904da365-utilities\") pod \"certified-operators-b9rk5\" (UID: \"94664536-aa38-4b5b-b7b2-fa28904da365\") " pod="openshift-marketplace/certified-operators-b9rk5" Nov 21 13:45:08 crc kubenswrapper[5133]: I1121 13:45:08.489362 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/94664536-aa38-4b5b-b7b2-fa28904da365-catalog-content\") pod \"certified-operators-b9rk5\" (UID: \"94664536-aa38-4b5b-b7b2-fa28904da365\") " pod="openshift-marketplace/certified-operators-b9rk5" Nov 21 13:45:08 crc kubenswrapper[5133]: I1121 13:45:08.489398 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4jjqm\" (UniqueName: \"kubernetes.io/projected/94664536-aa38-4b5b-b7b2-fa28904da365-kube-api-access-4jjqm\") pod \"certified-operators-b9rk5\" (UID: \"94664536-aa38-4b5b-b7b2-fa28904da365\") " pod="openshift-marketplace/certified-operators-b9rk5" Nov 21 13:45:08 crc kubenswrapper[5133]: I1121 13:45:08.489724 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/94664536-aa38-4b5b-b7b2-fa28904da365-utilities\") pod \"certified-operators-b9rk5\" (UID: \"94664536-aa38-4b5b-b7b2-fa28904da365\") " pod="openshift-marketplace/certified-operators-b9rk5" Nov 21 13:45:08 crc kubenswrapper[5133]: I1121 13:45:08.490065 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/94664536-aa38-4b5b-b7b2-fa28904da365-catalog-content\") pod \"certified-operators-b9rk5\" (UID: \"94664536-aa38-4b5b-b7b2-fa28904da365\") " pod="openshift-marketplace/certified-operators-b9rk5" Nov 21 13:45:08 crc kubenswrapper[5133]: I1121 13:45:08.503467 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-xp47m"] Nov 21 13:45:08 crc kubenswrapper[5133]: I1121 13:45:08.515951 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4jjqm\" (UniqueName: \"kubernetes.io/projected/94664536-aa38-4b5b-b7b2-fa28904da365-kube-api-access-4jjqm\") pod \"certified-operators-b9rk5\" (UID: \"94664536-aa38-4b5b-b7b2-fa28904da365\") " pod="openshift-marketplace/certified-operators-b9rk5" Nov 21 13:45:08 crc kubenswrapper[5133]: I1121 13:45:08.583088 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-b9rk5" Nov 21 13:45:08 crc kubenswrapper[5133]: I1121 13:45:08.596431 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:45:08 crc kubenswrapper[5133]: E1121 13:45:08.596690 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 13:45:09.096673421 +0000 UTC m=+168.894505669 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:08 crc kubenswrapper[5133]: I1121 13:45:08.596743 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:08 crc kubenswrapper[5133]: E1121 13:45:08.597055 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 13:45:09.097048711 +0000 UTC m=+168.894880959 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5rjt4" (UID: "fc3cef5b-e125-43c6-be9e-52ae6617e01a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:08 crc kubenswrapper[5133]: I1121 13:45:08.675444 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-w6r59"] Nov 21 13:45:08 crc kubenswrapper[5133]: E1121 13:45:08.697817 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 13:45:09.197777854 +0000 UTC m=+168.995610112 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:08 crc kubenswrapper[5133]: I1121 13:45:08.697620 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:45:08 crc kubenswrapper[5133]: I1121 13:45:08.698986 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:08 crc kubenswrapper[5133]: E1121 13:45:08.699425 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 13:45:09.199414348 +0000 UTC m=+168.997246596 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5rjt4" (UID: "fc3cef5b-e125-43c6-be9e-52ae6617e01a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:08 crc kubenswrapper[5133]: I1121 13:45:08.800919 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:45:08 crc kubenswrapper[5133]: E1121 13:45:08.801120 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 13:45:09.301084998 +0000 UTC m=+169.098917246 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:08 crc kubenswrapper[5133]: I1121 13:45:08.801437 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:08 crc kubenswrapper[5133]: E1121 13:45:08.801950 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 13:45:09.30193065 +0000 UTC m=+169.099762898 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5rjt4" (UID: "fc3cef5b-e125-43c6-be9e-52ae6617e01a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:08 crc kubenswrapper[5133]: I1121 13:45:08.806841 5133 patch_prober.go:28] interesting pod/router-default-5444994796-x7zf8 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 21 13:45:08 crc kubenswrapper[5133]: [-]has-synced failed: reason withheld Nov 21 13:45:08 crc kubenswrapper[5133]: [+]process-running ok Nov 21 13:45:08 crc kubenswrapper[5133]: healthz check failed Nov 21 13:45:08 crc kubenswrapper[5133]: I1121 13:45:08.806968 5133 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-x7zf8" podUID="dc4b9d28-3c54-466b-b41d-0f1381490b02" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 21 13:45:08 crc kubenswrapper[5133]: I1121 13:45:08.847575 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-b9rk5"] Nov 21 13:45:08 crc kubenswrapper[5133]: I1121 13:45:08.902990 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:45:08 crc kubenswrapper[5133]: E1121 13:45:08.903333 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 13:45:09.403318113 +0000 UTC m=+169.201150361 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:09 crc kubenswrapper[5133]: I1121 13:45:09.007851 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:09 crc kubenswrapper[5133]: E1121 13:45:09.008259 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 13:45:09.508242669 +0000 UTC m=+169.306074917 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5rjt4" (UID: "fc3cef5b-e125-43c6-be9e-52ae6617e01a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:09 crc kubenswrapper[5133]: I1121 13:45:09.061456 5133 generic.go:334] "Generic (PLEG): container finished" podID="ce3b64c0-a2cb-416d-9c9a-14c10514bfac" containerID="6f070e81de872627f154e47d87db194dec24af109e6461993acf644b41db38b6" exitCode=0 Nov 21 13:45:09 crc kubenswrapper[5133]: I1121 13:45:09.061564 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xp47m" event={"ID":"ce3b64c0-a2cb-416d-9c9a-14c10514bfac","Type":"ContainerDied","Data":"6f070e81de872627f154e47d87db194dec24af109e6461993acf644b41db38b6"} Nov 21 13:45:09 crc kubenswrapper[5133]: I1121 13:45:09.061597 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xp47m" event={"ID":"ce3b64c0-a2cb-416d-9c9a-14c10514bfac","Type":"ContainerStarted","Data":"e4b1bfce0f7ba5881296f773324ad77e0b8f7005f67b091174a3aa1019e7eec8"} Nov 21 13:45:09 crc kubenswrapper[5133]: I1121 13:45:09.063052 5133 generic.go:334] "Generic (PLEG): container finished" podID="297f6c8b-efd5-4df1-901e-d66882a776c5" containerID="79c1988725f1acd3f69110e5a469518dba6c71edea32306c440fdfe9d96d2dd8" exitCode=0 Nov 21 13:45:09 crc kubenswrapper[5133]: I1121 13:45:09.063111 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-w6r59" event={"ID":"297f6c8b-efd5-4df1-901e-d66882a776c5","Type":"ContainerDied","Data":"79c1988725f1acd3f69110e5a469518dba6c71edea32306c440fdfe9d96d2dd8"} Nov 21 13:45:09 crc kubenswrapper[5133]: I1121 13:45:09.063139 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-w6r59" event={"ID":"297f6c8b-efd5-4df1-901e-d66882a776c5","Type":"ContainerStarted","Data":"16b5c9b8939d799676b296e1a3be23df28e5ec38b08a13bdc59123692dc8587b"} Nov 21 13:45:09 crc kubenswrapper[5133]: I1121 13:45:09.063283 5133 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 21 13:45:09 crc kubenswrapper[5133]: I1121 13:45:09.065256 5133 generic.go:334] "Generic (PLEG): container finished" podID="a6b4a203-3329-4e08-b2d6-61ec7ee18e7e" containerID="cb60a5b355f445f12a76ba7feb5c9d9318eac5889dd32532073329c0198bcb61" exitCode=0 Nov 21 13:45:09 crc kubenswrapper[5133]: I1121 13:45:09.065386 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rfbh2" event={"ID":"a6b4a203-3329-4e08-b2d6-61ec7ee18e7e","Type":"ContainerDied","Data":"cb60a5b355f445f12a76ba7feb5c9d9318eac5889dd32532073329c0198bcb61"} Nov 21 13:45:09 crc kubenswrapper[5133]: I1121 13:45:09.065476 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rfbh2" event={"ID":"a6b4a203-3329-4e08-b2d6-61ec7ee18e7e","Type":"ContainerStarted","Data":"1e7ad4a4201a2c990212b92cd302903baedf0fd343876175bbd45218bbb30012"} Nov 21 13:45:09 crc kubenswrapper[5133]: I1121 13:45:09.070951 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-9zjjz" event={"ID":"63aa8ca7-9e72-4f3c-85e9-b9a423371b21","Type":"ContainerStarted","Data":"676aff00b569bdd0ead24168a6e4b4e4d6e054ce18aedf374ace4874ed8931cb"} Nov 21 13:45:09 crc kubenswrapper[5133]: I1121 13:45:09.071038 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-9zjjz" event={"ID":"63aa8ca7-9e72-4f3c-85e9-b9a423371b21","Type":"ContainerStarted","Data":"c12dc3652478b7e2d37e3dfedbfe0f19613af01af50f1cafcea10b6c26c8308b"} Nov 21 13:45:09 crc kubenswrapper[5133]: I1121 13:45:09.072989 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-b9rk5" event={"ID":"94664536-aa38-4b5b-b7b2-fa28904da365","Type":"ContainerStarted","Data":"760cdb55798d0e8262649674a27295c294922e72f7f4622436ee349b1f4f917c"} Nov 21 13:45:09 crc kubenswrapper[5133]: I1121 13:45:09.109030 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:45:09 crc kubenswrapper[5133]: E1121 13:45:09.109224 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-21 13:45:09.60919619 +0000 UTC m=+169.407028438 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:09 crc kubenswrapper[5133]: I1121 13:45:09.109302 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:09 crc kubenswrapper[5133]: E1121 13:45:09.109698 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-21 13:45:09.609690423 +0000 UTC m=+169.407522671 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-5rjt4" (UID: "fc3cef5b-e125-43c6-be9e-52ae6617e01a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 21 13:45:09 crc kubenswrapper[5133]: I1121 13:45:09.120583 5133 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2025-11-21T13:45:08.327959702Z","Handler":null,"Name":""} Nov 21 13:45:09 crc kubenswrapper[5133]: I1121 13:45:09.124633 5133 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Nov 21 13:45:09 crc kubenswrapper[5133]: I1121 13:45:09.124671 5133 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Nov 21 13:45:09 crc kubenswrapper[5133]: I1121 13:45:09.124744 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-9zjjz" podStartSLOduration=12.124722375 podStartE2EDuration="12.124722375s" podCreationTimestamp="2025-11-21 13:44:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 13:45:09.124088668 +0000 UTC m=+168.921920926" watchObservedRunningTime="2025-11-21 13:45:09.124722375 +0000 UTC m=+168.922554633" Nov 21 13:45:09 crc kubenswrapper[5133]: I1121 13:45:09.210215 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 21 13:45:09 crc kubenswrapper[5133]: I1121 13:45:09.214562 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 21 13:45:09 crc kubenswrapper[5133]: I1121 13:45:09.314633 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:09 crc kubenswrapper[5133]: I1121 13:45:09.320035 5133 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 21 13:45:09 crc kubenswrapper[5133]: I1121 13:45:09.320231 5133 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:09 crc kubenswrapper[5133]: I1121 13:45:09.386495 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-5rjt4\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:09 crc kubenswrapper[5133]: I1121 13:45:09.477523 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:09 crc kubenswrapper[5133]: I1121 13:45:09.671862 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-pm4gn"] Nov 21 13:45:09 crc kubenswrapper[5133]: I1121 13:45:09.674903 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pm4gn" Nov 21 13:45:09 crc kubenswrapper[5133]: I1121 13:45:09.681498 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 21 13:45:09 crc kubenswrapper[5133]: I1121 13:45:09.685113 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 21 13:45:09 crc kubenswrapper[5133]: I1121 13:45:09.686168 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 21 13:45:09 crc kubenswrapper[5133]: I1121 13:45:09.690932 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-pm4gn"] Nov 21 13:45:09 crc kubenswrapper[5133]: I1121 13:45:09.692809 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Nov 21 13:45:09 crc kubenswrapper[5133]: I1121 13:45:09.692821 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-5pm4x" Nov 21 13:45:09 crc kubenswrapper[5133]: I1121 13:45:09.693533 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Nov 21 13:45:09 crc kubenswrapper[5133]: I1121 13:45:09.694882 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 21 13:45:09 crc kubenswrapper[5133]: I1121 13:45:09.697331 5133 patch_prober.go:28] interesting pod/downloads-7954f5f757-7jhbl container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.32:8080/\": dial tcp 10.217.0.32:8080: connect: connection refused" start-of-body= Nov 21 13:45:09 crc kubenswrapper[5133]: I1121 13:45:09.697378 5133 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-7jhbl" podUID="bb29fa4c-cfd8-45b5-a0d3-895fb20d5cb8" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.32:8080/\": dial tcp 10.217.0.32:8080: connect: connection refused" Nov 21 13:45:09 crc kubenswrapper[5133]: I1121 13:45:09.697863 5133 patch_prober.go:28] interesting pod/downloads-7954f5f757-7jhbl container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.32:8080/\": dial tcp 10.217.0.32:8080: connect: connection refused" start-of-body= Nov 21 13:45:09 crc kubenswrapper[5133]: I1121 13:45:09.697889 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-7jhbl" podUID="bb29fa4c-cfd8-45b5-a0d3-895fb20d5cb8" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.32:8080/\": dial tcp 10.217.0.32:8080: connect: connection refused" Nov 21 13:45:09 crc kubenswrapper[5133]: I1121 13:45:09.753195 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-5rjt4"] Nov 21 13:45:09 crc kubenswrapper[5133]: I1121 13:45:09.754330 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-72h2c" Nov 21 13:45:09 crc kubenswrapper[5133]: I1121 13:45:09.760586 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-72h2c" Nov 21 13:45:09 crc kubenswrapper[5133]: I1121 13:45:09.802369 5133 patch_prober.go:28] interesting pod/router-default-5444994796-x7zf8 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 21 13:45:09 crc kubenswrapper[5133]: [-]has-synced failed: reason withheld Nov 21 13:45:09 crc kubenswrapper[5133]: [+]process-running ok Nov 21 13:45:09 crc kubenswrapper[5133]: healthz check failed Nov 21 13:45:09 crc kubenswrapper[5133]: I1121 13:45:09.802746 5133 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-x7zf8" podUID="dc4b9d28-3c54-466b-b41d-0f1381490b02" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 21 13:45:09 crc kubenswrapper[5133]: I1121 13:45:09.824858 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8997b5b2-53b0-4b74-92db-7e82f3b44ea2-utilities\") pod \"redhat-marketplace-pm4gn\" (UID: \"8997b5b2-53b0-4b74-92db-7e82f3b44ea2\") " pod="openshift-marketplace/redhat-marketplace-pm4gn" Nov 21 13:45:09 crc kubenswrapper[5133]: I1121 13:45:09.824915 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c945m\" (UniqueName: \"kubernetes.io/projected/8997b5b2-53b0-4b74-92db-7e82f3b44ea2-kube-api-access-c945m\") pod \"redhat-marketplace-pm4gn\" (UID: \"8997b5b2-53b0-4b74-92db-7e82f3b44ea2\") " pod="openshift-marketplace/redhat-marketplace-pm4gn" Nov 21 13:45:09 crc kubenswrapper[5133]: I1121 13:45:09.824987 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/2c33695a-1523-448d-990b-3b16e8902b8a-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"2c33695a-1523-448d-990b-3b16e8902b8a\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 21 13:45:09 crc kubenswrapper[5133]: I1121 13:45:09.825072 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8997b5b2-53b0-4b74-92db-7e82f3b44ea2-catalog-content\") pod \"redhat-marketplace-pm4gn\" (UID: \"8997b5b2-53b0-4b74-92db-7e82f3b44ea2\") " pod="openshift-marketplace/redhat-marketplace-pm4gn" Nov 21 13:45:09 crc kubenswrapper[5133]: I1121 13:45:09.825093 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2c33695a-1523-448d-990b-3b16e8902b8a-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"2c33695a-1523-448d-990b-3b16e8902b8a\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 21 13:45:09 crc kubenswrapper[5133]: I1121 13:45:09.927700 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8997b5b2-53b0-4b74-92db-7e82f3b44ea2-utilities\") pod \"redhat-marketplace-pm4gn\" (UID: \"8997b5b2-53b0-4b74-92db-7e82f3b44ea2\") " pod="openshift-marketplace/redhat-marketplace-pm4gn" Nov 21 13:45:09 crc kubenswrapper[5133]: I1121 13:45:09.927774 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c945m\" (UniqueName: \"kubernetes.io/projected/8997b5b2-53b0-4b74-92db-7e82f3b44ea2-kube-api-access-c945m\") pod \"redhat-marketplace-pm4gn\" (UID: \"8997b5b2-53b0-4b74-92db-7e82f3b44ea2\") " pod="openshift-marketplace/redhat-marketplace-pm4gn" Nov 21 13:45:09 crc kubenswrapper[5133]: I1121 13:45:09.927810 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/2c33695a-1523-448d-990b-3b16e8902b8a-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"2c33695a-1523-448d-990b-3b16e8902b8a\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 21 13:45:09 crc kubenswrapper[5133]: I1121 13:45:09.927860 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8997b5b2-53b0-4b74-92db-7e82f3b44ea2-catalog-content\") pod \"redhat-marketplace-pm4gn\" (UID: \"8997b5b2-53b0-4b74-92db-7e82f3b44ea2\") " pod="openshift-marketplace/redhat-marketplace-pm4gn" Nov 21 13:45:09 crc kubenswrapper[5133]: I1121 13:45:09.927880 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2c33695a-1523-448d-990b-3b16e8902b8a-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"2c33695a-1523-448d-990b-3b16e8902b8a\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 21 13:45:09 crc kubenswrapper[5133]: I1121 13:45:09.928651 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/2c33695a-1523-448d-990b-3b16e8902b8a-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"2c33695a-1523-448d-990b-3b16e8902b8a\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 21 13:45:09 crc kubenswrapper[5133]: I1121 13:45:09.929407 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8997b5b2-53b0-4b74-92db-7e82f3b44ea2-utilities\") pod \"redhat-marketplace-pm4gn\" (UID: \"8997b5b2-53b0-4b74-92db-7e82f3b44ea2\") " pod="openshift-marketplace/redhat-marketplace-pm4gn" Nov 21 13:45:09 crc kubenswrapper[5133]: I1121 13:45:09.929467 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8997b5b2-53b0-4b74-92db-7e82f3b44ea2-catalog-content\") pod \"redhat-marketplace-pm4gn\" (UID: \"8997b5b2-53b0-4b74-92db-7e82f3b44ea2\") " pod="openshift-marketplace/redhat-marketplace-pm4gn" Nov 21 13:45:09 crc kubenswrapper[5133]: I1121 13:45:09.944811 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-dn6hh" Nov 21 13:45:09 crc kubenswrapper[5133]: I1121 13:45:09.945125 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-dn6hh" Nov 21 13:45:09 crc kubenswrapper[5133]: I1121 13:45:09.965138 5133 patch_prober.go:28] interesting pod/apiserver-76f77b778f-dn6hh container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Nov 21 13:45:09 crc kubenswrapper[5133]: [+]log ok Nov 21 13:45:09 crc kubenswrapper[5133]: [+]etcd ok Nov 21 13:45:09 crc kubenswrapper[5133]: [+]poststarthook/start-apiserver-admission-initializer ok Nov 21 13:45:09 crc kubenswrapper[5133]: [+]poststarthook/generic-apiserver-start-informers ok Nov 21 13:45:09 crc kubenswrapper[5133]: [+]poststarthook/max-in-flight-filter ok Nov 21 13:45:09 crc kubenswrapper[5133]: [+]poststarthook/storage-object-count-tracker-hook ok Nov 21 13:45:09 crc kubenswrapper[5133]: [+]poststarthook/image.openshift.io-apiserver-caches ok Nov 21 13:45:09 crc kubenswrapper[5133]: [-]poststarthook/authorization.openshift.io-bootstrapclusterroles failed: reason withheld Nov 21 13:45:09 crc kubenswrapper[5133]: [+]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa ok Nov 21 13:45:09 crc kubenswrapper[5133]: [+]poststarthook/project.openshift.io-projectcache ok Nov 21 13:45:09 crc kubenswrapper[5133]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Nov 21 13:45:09 crc kubenswrapper[5133]: [+]poststarthook/openshift.io-startinformers ok Nov 21 13:45:09 crc kubenswrapper[5133]: [+]poststarthook/openshift.io-restmapperupdater ok Nov 21 13:45:09 crc kubenswrapper[5133]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Nov 21 13:45:09 crc kubenswrapper[5133]: livez check failed Nov 21 13:45:09 crc kubenswrapper[5133]: I1121 13:45:09.966085 5133 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-dn6hh" podUID="04f53da9-c9c4-4af6-a8e1-37e91549a81a" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 21 13:45:09 crc kubenswrapper[5133]: I1121 13:45:09.973023 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2c33695a-1523-448d-990b-3b16e8902b8a-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"2c33695a-1523-448d-990b-3b16e8902b8a\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 21 13:45:09 crc kubenswrapper[5133]: I1121 13:45:09.976639 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c945m\" (UniqueName: \"kubernetes.io/projected/8997b5b2-53b0-4b74-92db-7e82f3b44ea2-kube-api-access-c945m\") pod \"redhat-marketplace-pm4gn\" (UID: \"8997b5b2-53b0-4b74-92db-7e82f3b44ea2\") " pod="openshift-marketplace/redhat-marketplace-pm4gn" Nov 21 13:45:10 crc kubenswrapper[5133]: I1121 13:45:10.011992 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pm4gn" Nov 21 13:45:10 crc kubenswrapper[5133]: I1121 13:45:10.030573 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 21 13:45:10 crc kubenswrapper[5133]: I1121 13:45:10.032974 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-bbj2q"] Nov 21 13:45:10 crc kubenswrapper[5133]: I1121 13:45:10.034193 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bbj2q" Nov 21 13:45:10 crc kubenswrapper[5133]: I1121 13:45:10.043586 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-bbj2q"] Nov 21 13:45:10 crc kubenswrapper[5133]: I1121 13:45:10.061911 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-m54n8" Nov 21 13:45:10 crc kubenswrapper[5133]: I1121 13:45:10.062412 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-m54n8" Nov 21 13:45:10 crc kubenswrapper[5133]: I1121 13:45:10.063925 5133 patch_prober.go:28] interesting pod/console-f9d7485db-m54n8 container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.13:8443/health\": dial tcp 10.217.0.13:8443: connect: connection refused" start-of-body= Nov 21 13:45:10 crc kubenswrapper[5133]: I1121 13:45:10.063984 5133 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-m54n8" podUID="d1ec861e-fbe3-412e-9885-43a9e3c5be1e" containerName="console" probeResult="failure" output="Get \"https://10.217.0.13:8443/health\": dial tcp 10.217.0.13:8443: connect: connection refused" Nov 21 13:45:10 crc kubenswrapper[5133]: I1121 13:45:10.109854 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" event={"ID":"fc3cef5b-e125-43c6-be9e-52ae6617e01a","Type":"ContainerStarted","Data":"41a42c248eacc6cd87aa16ede50efbc988270bf60b3c42307b41f37dd4248165"} Nov 21 13:45:10 crc kubenswrapper[5133]: I1121 13:45:10.109903 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" event={"ID":"fc3cef5b-e125-43c6-be9e-52ae6617e01a","Type":"ContainerStarted","Data":"e1d194fe5caa27b7fe5b709029eac21b433162fbd46fe8ad73f72ddcd60d0dd1"} Nov 21 13:45:10 crc kubenswrapper[5133]: I1121 13:45:10.111039 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:10 crc kubenswrapper[5133]: I1121 13:45:10.114317 5133 generic.go:334] "Generic (PLEG): container finished" podID="94664536-aa38-4b5b-b7b2-fa28904da365" containerID="e0c05cab8dbd1aafc602ade374a9763f608d0b7ed35ebbab2220a8ae06acf2b8" exitCode=0 Nov 21 13:45:10 crc kubenswrapper[5133]: I1121 13:45:10.115449 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-b9rk5" event={"ID":"94664536-aa38-4b5b-b7b2-fa28904da365","Type":"ContainerDied","Data":"e0c05cab8dbd1aafc602ade374a9763f608d0b7ed35ebbab2220a8ae06acf2b8"} Nov 21 13:45:10 crc kubenswrapper[5133]: I1121 13:45:10.131554 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c292c11b-bba4-40c5-a591-ba3f896e610e-catalog-content\") pod \"redhat-marketplace-bbj2q\" (UID: \"c292c11b-bba4-40c5-a591-ba3f896e610e\") " pod="openshift-marketplace/redhat-marketplace-bbj2q" Nov 21 13:45:10 crc kubenswrapper[5133]: I1121 13:45:10.131624 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bjwfx\" (UniqueName: \"kubernetes.io/projected/c292c11b-bba4-40c5-a591-ba3f896e610e-kube-api-access-bjwfx\") pod \"redhat-marketplace-bbj2q\" (UID: \"c292c11b-bba4-40c5-a591-ba3f896e610e\") " pod="openshift-marketplace/redhat-marketplace-bbj2q" Nov 21 13:45:10 crc kubenswrapper[5133]: I1121 13:45:10.131721 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c292c11b-bba4-40c5-a591-ba3f896e610e-utilities\") pod \"redhat-marketplace-bbj2q\" (UID: \"c292c11b-bba4-40c5-a591-ba3f896e610e\") " pod="openshift-marketplace/redhat-marketplace-bbj2q" Nov 21 13:45:10 crc kubenswrapper[5133]: I1121 13:45:10.148453 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" podStartSLOduration=138.148422449 podStartE2EDuration="2m18.148422449s" podCreationTimestamp="2025-11-21 13:42:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 13:45:10.139846949 +0000 UTC m=+169.937679207" watchObservedRunningTime="2025-11-21 13:45:10.148422449 +0000 UTC m=+169.946254697" Nov 21 13:45:10 crc kubenswrapper[5133]: I1121 13:45:10.238838 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c292c11b-bba4-40c5-a591-ba3f896e610e-catalog-content\") pod \"redhat-marketplace-bbj2q\" (UID: \"c292c11b-bba4-40c5-a591-ba3f896e610e\") " pod="openshift-marketplace/redhat-marketplace-bbj2q" Nov 21 13:45:10 crc kubenswrapper[5133]: I1121 13:45:10.248756 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bjwfx\" (UniqueName: \"kubernetes.io/projected/c292c11b-bba4-40c5-a591-ba3f896e610e-kube-api-access-bjwfx\") pod \"redhat-marketplace-bbj2q\" (UID: \"c292c11b-bba4-40c5-a591-ba3f896e610e\") " pod="openshift-marketplace/redhat-marketplace-bbj2q" Nov 21 13:45:10 crc kubenswrapper[5133]: I1121 13:45:10.248918 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c292c11b-bba4-40c5-a591-ba3f896e610e-utilities\") pod \"redhat-marketplace-bbj2q\" (UID: \"c292c11b-bba4-40c5-a591-ba3f896e610e\") " pod="openshift-marketplace/redhat-marketplace-bbj2q" Nov 21 13:45:10 crc kubenswrapper[5133]: I1121 13:45:10.248769 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c292c11b-bba4-40c5-a591-ba3f896e610e-catalog-content\") pod \"redhat-marketplace-bbj2q\" (UID: \"c292c11b-bba4-40c5-a591-ba3f896e610e\") " pod="openshift-marketplace/redhat-marketplace-bbj2q" Nov 21 13:45:10 crc kubenswrapper[5133]: I1121 13:45:10.249389 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c292c11b-bba4-40c5-a591-ba3f896e610e-utilities\") pod \"redhat-marketplace-bbj2q\" (UID: \"c292c11b-bba4-40c5-a591-ba3f896e610e\") " pod="openshift-marketplace/redhat-marketplace-bbj2q" Nov 21 13:45:10 crc kubenswrapper[5133]: I1121 13:45:10.301519 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bjwfx\" (UniqueName: \"kubernetes.io/projected/c292c11b-bba4-40c5-a591-ba3f896e610e-kube-api-access-bjwfx\") pod \"redhat-marketplace-bbj2q\" (UID: \"c292c11b-bba4-40c5-a591-ba3f896e610e\") " pod="openshift-marketplace/redhat-marketplace-bbj2q" Nov 21 13:45:10 crc kubenswrapper[5133]: I1121 13:45:10.367367 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bbj2q" Nov 21 13:45:10 crc kubenswrapper[5133]: I1121 13:45:10.448381 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-pm4gn"] Nov 21 13:45:10 crc kubenswrapper[5133]: I1121 13:45:10.452281 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-zd8tq" Nov 21 13:45:10 crc kubenswrapper[5133]: I1121 13:45:10.469065 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Nov 21 13:45:10 crc kubenswrapper[5133]: I1121 13:45:10.599936 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 21 13:45:10 crc kubenswrapper[5133]: W1121 13:45:10.653143 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod2c33695a_1523_448d_990b_3b16e8902b8a.slice/crio-90879c3532dbda9d703319cda1c4f54b24d6cd9009c40b0baf533954456d3dbf WatchSource:0}: Error finding container 90879c3532dbda9d703319cda1c4f54b24d6cd9009c40b0baf533954456d3dbf: Status 404 returned error can't find the container with id 90879c3532dbda9d703319cda1c4f54b24d6cd9009c40b0baf533954456d3dbf Nov 21 13:45:10 crc kubenswrapper[5133]: I1121 13:45:10.685934 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-bbj2q"] Nov 21 13:45:10 crc kubenswrapper[5133]: W1121 13:45:10.704679 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc292c11b_bba4_40c5_a591_ba3f896e610e.slice/crio-00fa097bbe8f78733c1ecc7aaca32c34da2e29dc9ca45f7e2cb2f5c28ef3ff5f WatchSource:0}: Error finding container 00fa097bbe8f78733c1ecc7aaca32c34da2e29dc9ca45f7e2cb2f5c28ef3ff5f: Status 404 returned error can't find the container with id 00fa097bbe8f78733c1ecc7aaca32c34da2e29dc9ca45f7e2cb2f5c28ef3ff5f Nov 21 13:45:10 crc kubenswrapper[5133]: I1121 13:45:10.797305 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-x7zf8" Nov 21 13:45:10 crc kubenswrapper[5133]: I1121 13:45:10.803915 5133 patch_prober.go:28] interesting pod/router-default-5444994796-x7zf8 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 21 13:45:10 crc kubenswrapper[5133]: [-]has-synced failed: reason withheld Nov 21 13:45:10 crc kubenswrapper[5133]: [+]process-running ok Nov 21 13:45:10 crc kubenswrapper[5133]: healthz check failed Nov 21 13:45:10 crc kubenswrapper[5133]: I1121 13:45:10.804027 5133 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-x7zf8" podUID="dc4b9d28-3c54-466b-b41d-0f1381490b02" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 21 13:45:11 crc kubenswrapper[5133]: I1121 13:45:11.033462 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-wt7r9"] Nov 21 13:45:11 crc kubenswrapper[5133]: I1121 13:45:11.034876 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wt7r9" Nov 21 13:45:11 crc kubenswrapper[5133]: I1121 13:45:11.042084 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 21 13:45:11 crc kubenswrapper[5133]: I1121 13:45:11.045938 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wt7r9"] Nov 21 13:45:11 crc kubenswrapper[5133]: I1121 13:45:11.126298 5133 generic.go:334] "Generic (PLEG): container finished" podID="af22fa12-851f-4ec2-81f1-b3df1186e00c" containerID="d20c2728dcc96c4cd1306e3a9967d3012671b156ddf3797e7e37755333282238" exitCode=0 Nov 21 13:45:11 crc kubenswrapper[5133]: I1121 13:45:11.126365 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395545-wrdtj" event={"ID":"af22fa12-851f-4ec2-81f1-b3df1186e00c","Type":"ContainerDied","Data":"d20c2728dcc96c4cd1306e3a9967d3012671b156ddf3797e7e37755333282238"} Nov 21 13:45:11 crc kubenswrapper[5133]: I1121 13:45:11.132081 5133 generic.go:334] "Generic (PLEG): container finished" podID="8997b5b2-53b0-4b74-92db-7e82f3b44ea2" containerID="4ecd3d7eaa3de8888e1fa0a113fc65423969ce9e1d442a0c4456714aab37ea67" exitCode=0 Nov 21 13:45:11 crc kubenswrapper[5133]: I1121 13:45:11.132237 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pm4gn" event={"ID":"8997b5b2-53b0-4b74-92db-7e82f3b44ea2","Type":"ContainerDied","Data":"4ecd3d7eaa3de8888e1fa0a113fc65423969ce9e1d442a0c4456714aab37ea67"} Nov 21 13:45:11 crc kubenswrapper[5133]: I1121 13:45:11.132308 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pm4gn" event={"ID":"8997b5b2-53b0-4b74-92db-7e82f3b44ea2","Type":"ContainerStarted","Data":"d1acf04170539ac6265d1fc19c187e7a7ed3570c7c142327fcb9b28c6811b3c7"} Nov 21 13:45:11 crc kubenswrapper[5133]: I1121 13:45:11.141730 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bbj2q" event={"ID":"c292c11b-bba4-40c5-a591-ba3f896e610e","Type":"ContainerStarted","Data":"07f5f887ec4ae8b3d3a9aa3943dd23d751b95276b5c4c460cfad8c2958341242"} Nov 21 13:45:11 crc kubenswrapper[5133]: I1121 13:45:11.141776 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bbj2q" event={"ID":"c292c11b-bba4-40c5-a591-ba3f896e610e","Type":"ContainerStarted","Data":"00fa097bbe8f78733c1ecc7aaca32c34da2e29dc9ca45f7e2cb2f5c28ef3ff5f"} Nov 21 13:45:11 crc kubenswrapper[5133]: I1121 13:45:11.153404 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"2c33695a-1523-448d-990b-3b16e8902b8a","Type":"ContainerStarted","Data":"90879c3532dbda9d703319cda1c4f54b24d6cd9009c40b0baf533954456d3dbf"} Nov 21 13:45:11 crc kubenswrapper[5133]: I1121 13:45:11.189242 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cb05a95a-9c39-4c50-b64b-91d20d5d5ebc-catalog-content\") pod \"redhat-operators-wt7r9\" (UID: \"cb05a95a-9c39-4c50-b64b-91d20d5d5ebc\") " pod="openshift-marketplace/redhat-operators-wt7r9" Nov 21 13:45:11 crc kubenswrapper[5133]: I1121 13:45:11.189293 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4cs8c\" (UniqueName: \"kubernetes.io/projected/cb05a95a-9c39-4c50-b64b-91d20d5d5ebc-kube-api-access-4cs8c\") pod \"redhat-operators-wt7r9\" (UID: \"cb05a95a-9c39-4c50-b64b-91d20d5d5ebc\") " pod="openshift-marketplace/redhat-operators-wt7r9" Nov 21 13:45:11 crc kubenswrapper[5133]: I1121 13:45:11.189339 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cb05a95a-9c39-4c50-b64b-91d20d5d5ebc-utilities\") pod \"redhat-operators-wt7r9\" (UID: \"cb05a95a-9c39-4c50-b64b-91d20d5d5ebc\") " pod="openshift-marketplace/redhat-operators-wt7r9" Nov 21 13:45:11 crc kubenswrapper[5133]: I1121 13:45:11.294659 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cb05a95a-9c39-4c50-b64b-91d20d5d5ebc-catalog-content\") pod \"redhat-operators-wt7r9\" (UID: \"cb05a95a-9c39-4c50-b64b-91d20d5d5ebc\") " pod="openshift-marketplace/redhat-operators-wt7r9" Nov 21 13:45:11 crc kubenswrapper[5133]: I1121 13:45:11.294708 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4cs8c\" (UniqueName: \"kubernetes.io/projected/cb05a95a-9c39-4c50-b64b-91d20d5d5ebc-kube-api-access-4cs8c\") pod \"redhat-operators-wt7r9\" (UID: \"cb05a95a-9c39-4c50-b64b-91d20d5d5ebc\") " pod="openshift-marketplace/redhat-operators-wt7r9" Nov 21 13:45:11 crc kubenswrapper[5133]: I1121 13:45:11.294739 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cb05a95a-9c39-4c50-b64b-91d20d5d5ebc-utilities\") pod \"redhat-operators-wt7r9\" (UID: \"cb05a95a-9c39-4c50-b64b-91d20d5d5ebc\") " pod="openshift-marketplace/redhat-operators-wt7r9" Nov 21 13:45:11 crc kubenswrapper[5133]: I1121 13:45:11.297591 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cb05a95a-9c39-4c50-b64b-91d20d5d5ebc-utilities\") pod \"redhat-operators-wt7r9\" (UID: \"cb05a95a-9c39-4c50-b64b-91d20d5d5ebc\") " pod="openshift-marketplace/redhat-operators-wt7r9" Nov 21 13:45:11 crc kubenswrapper[5133]: I1121 13:45:11.298036 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cb05a95a-9c39-4c50-b64b-91d20d5d5ebc-catalog-content\") pod \"redhat-operators-wt7r9\" (UID: \"cb05a95a-9c39-4c50-b64b-91d20d5d5ebc\") " pod="openshift-marketplace/redhat-operators-wt7r9" Nov 21 13:45:11 crc kubenswrapper[5133]: I1121 13:45:11.326393 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4cs8c\" (UniqueName: \"kubernetes.io/projected/cb05a95a-9c39-4c50-b64b-91d20d5d5ebc-kube-api-access-4cs8c\") pod \"redhat-operators-wt7r9\" (UID: \"cb05a95a-9c39-4c50-b64b-91d20d5d5ebc\") " pod="openshift-marketplace/redhat-operators-wt7r9" Nov 21 13:45:11 crc kubenswrapper[5133]: I1121 13:45:11.361102 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wt7r9" Nov 21 13:45:11 crc kubenswrapper[5133]: I1121 13:45:11.433410 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-2z48l"] Nov 21 13:45:11 crc kubenswrapper[5133]: I1121 13:45:11.434570 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2z48l" Nov 21 13:45:11 crc kubenswrapper[5133]: I1121 13:45:11.451051 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-2z48l"] Nov 21 13:45:11 crc kubenswrapper[5133]: I1121 13:45:11.604918 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rlrsx\" (UniqueName: \"kubernetes.io/projected/a76889ae-3a7d-4d58-a381-9b8082db6f4a-kube-api-access-rlrsx\") pod \"redhat-operators-2z48l\" (UID: \"a76889ae-3a7d-4d58-a381-9b8082db6f4a\") " pod="openshift-marketplace/redhat-operators-2z48l" Nov 21 13:45:11 crc kubenswrapper[5133]: I1121 13:45:11.605520 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a76889ae-3a7d-4d58-a381-9b8082db6f4a-utilities\") pod \"redhat-operators-2z48l\" (UID: \"a76889ae-3a7d-4d58-a381-9b8082db6f4a\") " pod="openshift-marketplace/redhat-operators-2z48l" Nov 21 13:45:11 crc kubenswrapper[5133]: I1121 13:45:11.605723 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a76889ae-3a7d-4d58-a381-9b8082db6f4a-catalog-content\") pod \"redhat-operators-2z48l\" (UID: \"a76889ae-3a7d-4d58-a381-9b8082db6f4a\") " pod="openshift-marketplace/redhat-operators-2z48l" Nov 21 13:45:11 crc kubenswrapper[5133]: I1121 13:45:11.707136 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a76889ae-3a7d-4d58-a381-9b8082db6f4a-utilities\") pod \"redhat-operators-2z48l\" (UID: \"a76889ae-3a7d-4d58-a381-9b8082db6f4a\") " pod="openshift-marketplace/redhat-operators-2z48l" Nov 21 13:45:11 crc kubenswrapper[5133]: I1121 13:45:11.707238 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a76889ae-3a7d-4d58-a381-9b8082db6f4a-catalog-content\") pod \"redhat-operators-2z48l\" (UID: \"a76889ae-3a7d-4d58-a381-9b8082db6f4a\") " pod="openshift-marketplace/redhat-operators-2z48l" Nov 21 13:45:11 crc kubenswrapper[5133]: I1121 13:45:11.707276 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rlrsx\" (UniqueName: \"kubernetes.io/projected/a76889ae-3a7d-4d58-a381-9b8082db6f4a-kube-api-access-rlrsx\") pod \"redhat-operators-2z48l\" (UID: \"a76889ae-3a7d-4d58-a381-9b8082db6f4a\") " pod="openshift-marketplace/redhat-operators-2z48l" Nov 21 13:45:11 crc kubenswrapper[5133]: I1121 13:45:11.707826 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a76889ae-3a7d-4d58-a381-9b8082db6f4a-catalog-content\") pod \"redhat-operators-2z48l\" (UID: \"a76889ae-3a7d-4d58-a381-9b8082db6f4a\") " pod="openshift-marketplace/redhat-operators-2z48l" Nov 21 13:45:11 crc kubenswrapper[5133]: I1121 13:45:11.708691 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a76889ae-3a7d-4d58-a381-9b8082db6f4a-utilities\") pod \"redhat-operators-2z48l\" (UID: \"a76889ae-3a7d-4d58-a381-9b8082db6f4a\") " pod="openshift-marketplace/redhat-operators-2z48l" Nov 21 13:45:11 crc kubenswrapper[5133]: I1121 13:45:11.728706 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rlrsx\" (UniqueName: \"kubernetes.io/projected/a76889ae-3a7d-4d58-a381-9b8082db6f4a-kube-api-access-rlrsx\") pod \"redhat-operators-2z48l\" (UID: \"a76889ae-3a7d-4d58-a381-9b8082db6f4a\") " pod="openshift-marketplace/redhat-operators-2z48l" Nov 21 13:45:11 crc kubenswrapper[5133]: I1121 13:45:11.798948 5133 patch_prober.go:28] interesting pod/router-default-5444994796-x7zf8 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 21 13:45:11 crc kubenswrapper[5133]: [-]has-synced failed: reason withheld Nov 21 13:45:11 crc kubenswrapper[5133]: [+]process-running ok Nov 21 13:45:11 crc kubenswrapper[5133]: healthz check failed Nov 21 13:45:11 crc kubenswrapper[5133]: I1121 13:45:11.799032 5133 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-x7zf8" podUID="dc4b9d28-3c54-466b-b41d-0f1381490b02" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 21 13:45:11 crc kubenswrapper[5133]: I1121 13:45:11.810948 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2z48l" Nov 21 13:45:11 crc kubenswrapper[5133]: I1121 13:45:11.915531 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wt7r9"] Nov 21 13:45:12 crc kubenswrapper[5133]: I1121 13:45:12.178017 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-2z48l"] Nov 21 13:45:12 crc kubenswrapper[5133]: I1121 13:45:12.178583 5133 generic.go:334] "Generic (PLEG): container finished" podID="c292c11b-bba4-40c5-a591-ba3f896e610e" containerID="07f5f887ec4ae8b3d3a9aa3943dd23d751b95276b5c4c460cfad8c2958341242" exitCode=0 Nov 21 13:45:12 crc kubenswrapper[5133]: I1121 13:45:12.178912 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bbj2q" event={"ID":"c292c11b-bba4-40c5-a591-ba3f896e610e","Type":"ContainerDied","Data":"07f5f887ec4ae8b3d3a9aa3943dd23d751b95276b5c4c460cfad8c2958341242"} Nov 21 13:45:12 crc kubenswrapper[5133]: I1121 13:45:12.186176 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"2c33695a-1523-448d-990b-3b16e8902b8a","Type":"ContainerStarted","Data":"3650b524b41cd0b18b3f676c76061a9551840bd7b5f3fc269f1ee917dcfde4bc"} Nov 21 13:45:12 crc kubenswrapper[5133]: I1121 13:45:12.191819 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wt7r9" event={"ID":"cb05a95a-9c39-4c50-b64b-91d20d5d5ebc","Type":"ContainerStarted","Data":"a454ff51dfa0c2b9c281e28a28a5e5d2bc608efcf607ce20f6d99021a0db6331"} Nov 21 13:45:12 crc kubenswrapper[5133]: W1121 13:45:12.192478 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda76889ae_3a7d_4d58_a381_9b8082db6f4a.slice/crio-216bec72ce3f62e2d26b14e9683324f4dabc6f446780687291f76180e306a6d4 WatchSource:0}: Error finding container 216bec72ce3f62e2d26b14e9683324f4dabc6f446780687291f76180e306a6d4: Status 404 returned error can't find the container with id 216bec72ce3f62e2d26b14e9683324f4dabc6f446780687291f76180e306a6d4 Nov 21 13:45:12 crc kubenswrapper[5133]: I1121 13:45:12.217857 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/revision-pruner-9-crc" podStartSLOduration=3.217823933 podStartE2EDuration="3.217823933s" podCreationTimestamp="2025-11-21 13:45:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 13:45:12.217530745 +0000 UTC m=+172.015363003" watchObservedRunningTime="2025-11-21 13:45:12.217823933 +0000 UTC m=+172.015656181" Nov 21 13:45:12 crc kubenswrapper[5133]: I1121 13:45:12.518607 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395545-wrdtj" Nov 21 13:45:12 crc kubenswrapper[5133]: I1121 13:45:12.625723 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/af22fa12-851f-4ec2-81f1-b3df1186e00c-secret-volume\") pod \"af22fa12-851f-4ec2-81f1-b3df1186e00c\" (UID: \"af22fa12-851f-4ec2-81f1-b3df1186e00c\") " Nov 21 13:45:12 crc kubenswrapper[5133]: I1121 13:45:12.625796 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/af22fa12-851f-4ec2-81f1-b3df1186e00c-config-volume\") pod \"af22fa12-851f-4ec2-81f1-b3df1186e00c\" (UID: \"af22fa12-851f-4ec2-81f1-b3df1186e00c\") " Nov 21 13:45:12 crc kubenswrapper[5133]: I1121 13:45:12.625873 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xk828\" (UniqueName: \"kubernetes.io/projected/af22fa12-851f-4ec2-81f1-b3df1186e00c-kube-api-access-xk828\") pod \"af22fa12-851f-4ec2-81f1-b3df1186e00c\" (UID: \"af22fa12-851f-4ec2-81f1-b3df1186e00c\") " Nov 21 13:45:12 crc kubenswrapper[5133]: I1121 13:45:12.628057 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/af22fa12-851f-4ec2-81f1-b3df1186e00c-config-volume" (OuterVolumeSpecName: "config-volume") pod "af22fa12-851f-4ec2-81f1-b3df1186e00c" (UID: "af22fa12-851f-4ec2-81f1-b3df1186e00c"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:45:12 crc kubenswrapper[5133]: I1121 13:45:12.635510 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/af22fa12-851f-4ec2-81f1-b3df1186e00c-kube-api-access-xk828" (OuterVolumeSpecName: "kube-api-access-xk828") pod "af22fa12-851f-4ec2-81f1-b3df1186e00c" (UID: "af22fa12-851f-4ec2-81f1-b3df1186e00c"). InnerVolumeSpecName "kube-api-access-xk828". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:45:12 crc kubenswrapper[5133]: I1121 13:45:12.635699 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/af22fa12-851f-4ec2-81f1-b3df1186e00c-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "af22fa12-851f-4ec2-81f1-b3df1186e00c" (UID: "af22fa12-851f-4ec2-81f1-b3df1186e00c"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:45:12 crc kubenswrapper[5133]: I1121 13:45:12.727160 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xk828\" (UniqueName: \"kubernetes.io/projected/af22fa12-851f-4ec2-81f1-b3df1186e00c-kube-api-access-xk828\") on node \"crc\" DevicePath \"\"" Nov 21 13:45:12 crc kubenswrapper[5133]: I1121 13:45:12.727192 5133 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/af22fa12-851f-4ec2-81f1-b3df1186e00c-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 21 13:45:12 crc kubenswrapper[5133]: I1121 13:45:12.727201 5133 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/af22fa12-851f-4ec2-81f1-b3df1186e00c-config-volume\") on node \"crc\" DevicePath \"\"" Nov 21 13:45:12 crc kubenswrapper[5133]: I1121 13:45:12.808800 5133 patch_prober.go:28] interesting pod/router-default-5444994796-x7zf8 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 21 13:45:12 crc kubenswrapper[5133]: [-]has-synced failed: reason withheld Nov 21 13:45:12 crc kubenswrapper[5133]: [+]process-running ok Nov 21 13:45:12 crc kubenswrapper[5133]: healthz check failed Nov 21 13:45:12 crc kubenswrapper[5133]: I1121 13:45:12.808880 5133 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-x7zf8" podUID="dc4b9d28-3c54-466b-b41d-0f1381490b02" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 21 13:45:12 crc kubenswrapper[5133]: I1121 13:45:12.935371 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 21 13:45:12 crc kubenswrapper[5133]: E1121 13:45:12.935675 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af22fa12-851f-4ec2-81f1-b3df1186e00c" containerName="collect-profiles" Nov 21 13:45:12 crc kubenswrapper[5133]: I1121 13:45:12.935688 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="af22fa12-851f-4ec2-81f1-b3df1186e00c" containerName="collect-profiles" Nov 21 13:45:12 crc kubenswrapper[5133]: I1121 13:45:12.935885 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="af22fa12-851f-4ec2-81f1-b3df1186e00c" containerName="collect-profiles" Nov 21 13:45:12 crc kubenswrapper[5133]: I1121 13:45:12.936389 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 21 13:45:12 crc kubenswrapper[5133]: I1121 13:45:12.939600 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 21 13:45:12 crc kubenswrapper[5133]: I1121 13:45:12.939944 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 21 13:45:12 crc kubenswrapper[5133]: I1121 13:45:12.940026 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 21 13:45:13 crc kubenswrapper[5133]: I1121 13:45:13.033587 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/6a752751-f0e9-4016-aeea-ca2df83e2981-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"6a752751-f0e9-4016-aeea-ca2df83e2981\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 21 13:45:13 crc kubenswrapper[5133]: I1121 13:45:13.033663 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6a752751-f0e9-4016-aeea-ca2df83e2981-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"6a752751-f0e9-4016-aeea-ca2df83e2981\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 21 13:45:13 crc kubenswrapper[5133]: I1121 13:45:13.135268 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/6a752751-f0e9-4016-aeea-ca2df83e2981-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"6a752751-f0e9-4016-aeea-ca2df83e2981\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 21 13:45:13 crc kubenswrapper[5133]: I1121 13:45:13.135503 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6a752751-f0e9-4016-aeea-ca2df83e2981-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"6a752751-f0e9-4016-aeea-ca2df83e2981\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 21 13:45:13 crc kubenswrapper[5133]: I1121 13:45:13.135604 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/6a752751-f0e9-4016-aeea-ca2df83e2981-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"6a752751-f0e9-4016-aeea-ca2df83e2981\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 21 13:45:13 crc kubenswrapper[5133]: I1121 13:45:13.154152 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6a752751-f0e9-4016-aeea-ca2df83e2981-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"6a752751-f0e9-4016-aeea-ca2df83e2981\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 21 13:45:13 crc kubenswrapper[5133]: I1121 13:45:13.205229 5133 generic.go:334] "Generic (PLEG): container finished" podID="cb05a95a-9c39-4c50-b64b-91d20d5d5ebc" containerID="c74ce0ee8da8a5ef559b51b3c06036145660861895d1177711f1540d0e95d035" exitCode=0 Nov 21 13:45:13 crc kubenswrapper[5133]: I1121 13:45:13.205322 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wt7r9" event={"ID":"cb05a95a-9c39-4c50-b64b-91d20d5d5ebc","Type":"ContainerDied","Data":"c74ce0ee8da8a5ef559b51b3c06036145660861895d1177711f1540d0e95d035"} Nov 21 13:45:13 crc kubenswrapper[5133]: I1121 13:45:13.208138 5133 generic.go:334] "Generic (PLEG): container finished" podID="2c33695a-1523-448d-990b-3b16e8902b8a" containerID="3650b524b41cd0b18b3f676c76061a9551840bd7b5f3fc269f1ee917dcfde4bc" exitCode=0 Nov 21 13:45:13 crc kubenswrapper[5133]: I1121 13:45:13.208242 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"2c33695a-1523-448d-990b-3b16e8902b8a","Type":"ContainerDied","Data":"3650b524b41cd0b18b3f676c76061a9551840bd7b5f3fc269f1ee917dcfde4bc"} Nov 21 13:45:13 crc kubenswrapper[5133]: I1121 13:45:13.215575 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2z48l" event={"ID":"a76889ae-3a7d-4d58-a381-9b8082db6f4a","Type":"ContainerStarted","Data":"8206f0a9d13477518b2b6cf7d9c11bc93a63186af31a20180f1c841d61debac0"} Nov 21 13:45:13 crc kubenswrapper[5133]: I1121 13:45:13.215629 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2z48l" event={"ID":"a76889ae-3a7d-4d58-a381-9b8082db6f4a","Type":"ContainerStarted","Data":"216bec72ce3f62e2d26b14e9683324f4dabc6f446780687291f76180e306a6d4"} Nov 21 13:45:13 crc kubenswrapper[5133]: I1121 13:45:13.217420 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395545-wrdtj" event={"ID":"af22fa12-851f-4ec2-81f1-b3df1186e00c","Type":"ContainerDied","Data":"d7f08fb427180d9a6b3a0ffa16ebe453ab2e4b5860112f2f70178a6d7d63d5d4"} Nov 21 13:45:13 crc kubenswrapper[5133]: I1121 13:45:13.217476 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d7f08fb427180d9a6b3a0ffa16ebe453ab2e4b5860112f2f70178a6d7d63d5d4" Nov 21 13:45:13 crc kubenswrapper[5133]: I1121 13:45:13.217522 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395545-wrdtj" Nov 21 13:45:13 crc kubenswrapper[5133]: I1121 13:45:13.302132 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 21 13:45:13 crc kubenswrapper[5133]: I1121 13:45:13.557915 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 21 13:45:13 crc kubenswrapper[5133]: W1121 13:45:13.568546 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod6a752751_f0e9_4016_aeea_ca2df83e2981.slice/crio-0d5ea965f8cb6328752359c276ec7a7386cf655259ca2e6aba2487d05173cb27 WatchSource:0}: Error finding container 0d5ea965f8cb6328752359c276ec7a7386cf655259ca2e6aba2487d05173cb27: Status 404 returned error can't find the container with id 0d5ea965f8cb6328752359c276ec7a7386cf655259ca2e6aba2487d05173cb27 Nov 21 13:45:13 crc kubenswrapper[5133]: I1121 13:45:13.798801 5133 patch_prober.go:28] interesting pod/router-default-5444994796-x7zf8 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 21 13:45:13 crc kubenswrapper[5133]: [-]has-synced failed: reason withheld Nov 21 13:45:13 crc kubenswrapper[5133]: [+]process-running ok Nov 21 13:45:13 crc kubenswrapper[5133]: healthz check failed Nov 21 13:45:13 crc kubenswrapper[5133]: I1121 13:45:13.799292 5133 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-x7zf8" podUID="dc4b9d28-3c54-466b-b41d-0f1381490b02" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 21 13:45:14 crc kubenswrapper[5133]: I1121 13:45:14.229607 5133 generic.go:334] "Generic (PLEG): container finished" podID="a76889ae-3a7d-4d58-a381-9b8082db6f4a" containerID="8206f0a9d13477518b2b6cf7d9c11bc93a63186af31a20180f1c841d61debac0" exitCode=0 Nov 21 13:45:14 crc kubenswrapper[5133]: I1121 13:45:14.229697 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2z48l" event={"ID":"a76889ae-3a7d-4d58-a381-9b8082db6f4a","Type":"ContainerDied","Data":"8206f0a9d13477518b2b6cf7d9c11bc93a63186af31a20180f1c841d61debac0"} Nov 21 13:45:14 crc kubenswrapper[5133]: I1121 13:45:14.231452 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"6a752751-f0e9-4016-aeea-ca2df83e2981","Type":"ContainerStarted","Data":"0d5ea965f8cb6328752359c276ec7a7386cf655259ca2e6aba2487d05173cb27"} Nov 21 13:45:14 crc kubenswrapper[5133]: I1121 13:45:14.532412 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 21 13:45:14 crc kubenswrapper[5133]: I1121 13:45:14.667546 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2c33695a-1523-448d-990b-3b16e8902b8a-kube-api-access\") pod \"2c33695a-1523-448d-990b-3b16e8902b8a\" (UID: \"2c33695a-1523-448d-990b-3b16e8902b8a\") " Nov 21 13:45:14 crc kubenswrapper[5133]: I1121 13:45:14.667644 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/2c33695a-1523-448d-990b-3b16e8902b8a-kubelet-dir\") pod \"2c33695a-1523-448d-990b-3b16e8902b8a\" (UID: \"2c33695a-1523-448d-990b-3b16e8902b8a\") " Nov 21 13:45:14 crc kubenswrapper[5133]: I1121 13:45:14.668259 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2c33695a-1523-448d-990b-3b16e8902b8a-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "2c33695a-1523-448d-990b-3b16e8902b8a" (UID: "2c33695a-1523-448d-990b-3b16e8902b8a"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 13:45:14 crc kubenswrapper[5133]: I1121 13:45:14.675737 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2c33695a-1523-448d-990b-3b16e8902b8a-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "2c33695a-1523-448d-990b-3b16e8902b8a" (UID: "2c33695a-1523-448d-990b-3b16e8902b8a"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:45:14 crc kubenswrapper[5133]: I1121 13:45:14.769730 5133 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/2c33695a-1523-448d-990b-3b16e8902b8a-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 21 13:45:14 crc kubenswrapper[5133]: I1121 13:45:14.769775 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2c33695a-1523-448d-990b-3b16e8902b8a-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 21 13:45:14 crc kubenswrapper[5133]: I1121 13:45:14.802883 5133 patch_prober.go:28] interesting pod/router-default-5444994796-x7zf8 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 21 13:45:14 crc kubenswrapper[5133]: [-]has-synced failed: reason withheld Nov 21 13:45:14 crc kubenswrapper[5133]: [+]process-running ok Nov 21 13:45:14 crc kubenswrapper[5133]: healthz check failed Nov 21 13:45:14 crc kubenswrapper[5133]: I1121 13:45:14.803060 5133 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-x7zf8" podUID="dc4b9d28-3c54-466b-b41d-0f1381490b02" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 21 13:45:14 crc kubenswrapper[5133]: I1121 13:45:14.955848 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-dn6hh" Nov 21 13:45:14 crc kubenswrapper[5133]: I1121 13:45:14.961119 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-dn6hh" Nov 21 13:45:15 crc kubenswrapper[5133]: I1121 13:45:15.243560 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"2c33695a-1523-448d-990b-3b16e8902b8a","Type":"ContainerDied","Data":"90879c3532dbda9d703319cda1c4f54b24d6cd9009c40b0baf533954456d3dbf"} Nov 21 13:45:15 crc kubenswrapper[5133]: I1121 13:45:15.243689 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="90879c3532dbda9d703319cda1c4f54b24d6cd9009c40b0baf533954456d3dbf" Nov 21 13:45:15 crc kubenswrapper[5133]: I1121 13:45:15.243600 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 21 13:45:15 crc kubenswrapper[5133]: I1121 13:45:15.279473 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b3aabda0-97d9-4886-8909-1c423c4d3238-metrics-certs\") pod \"network-metrics-daemon-x5wnh\" (UID: \"b3aabda0-97d9-4886-8909-1c423c4d3238\") " pod="openshift-multus/network-metrics-daemon-x5wnh" Nov 21 13:45:15 crc kubenswrapper[5133]: I1121 13:45:15.283648 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b3aabda0-97d9-4886-8909-1c423c4d3238-metrics-certs\") pod \"network-metrics-daemon-x5wnh\" (UID: \"b3aabda0-97d9-4886-8909-1c423c4d3238\") " pod="openshift-multus/network-metrics-daemon-x5wnh" Nov 21 13:45:15 crc kubenswrapper[5133]: I1121 13:45:15.311152 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-x5wnh" Nov 21 13:45:15 crc kubenswrapper[5133]: I1121 13:45:15.596145 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-x5wnh"] Nov 21 13:45:15 crc kubenswrapper[5133]: I1121 13:45:15.797625 5133 patch_prober.go:28] interesting pod/router-default-5444994796-x7zf8 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 21 13:45:15 crc kubenswrapper[5133]: [-]has-synced failed: reason withheld Nov 21 13:45:15 crc kubenswrapper[5133]: [+]process-running ok Nov 21 13:45:15 crc kubenswrapper[5133]: healthz check failed Nov 21 13:45:15 crc kubenswrapper[5133]: I1121 13:45:15.797700 5133 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-x7zf8" podUID="dc4b9d28-3c54-466b-b41d-0f1381490b02" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 21 13:45:16 crc kubenswrapper[5133]: I1121 13:45:16.009919 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-9qx4w" Nov 21 13:45:16 crc kubenswrapper[5133]: I1121 13:45:16.275194 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-x5wnh" event={"ID":"b3aabda0-97d9-4886-8909-1c423c4d3238","Type":"ContainerStarted","Data":"3f393b2347132e8908f6932372c5e0763a0ce8a1080b5070ac7d9a168f445bad"} Nov 21 13:45:16 crc kubenswrapper[5133]: I1121 13:45:16.275262 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-x5wnh" event={"ID":"b3aabda0-97d9-4886-8909-1c423c4d3238","Type":"ContainerStarted","Data":"076541731f5308c7b764727bbcf4350f44bf3dbc67e0de2041569ff2de33a679"} Nov 21 13:45:16 crc kubenswrapper[5133]: I1121 13:45:16.290030 5133 generic.go:334] "Generic (PLEG): container finished" podID="6a752751-f0e9-4016-aeea-ca2df83e2981" containerID="7edbcc78b39ba5d87cb4461cd97559267e10a46afb335c2faec7d5dde5bdf90c" exitCode=0 Nov 21 13:45:16 crc kubenswrapper[5133]: I1121 13:45:16.290159 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"6a752751-f0e9-4016-aeea-ca2df83e2981","Type":"ContainerDied","Data":"7edbcc78b39ba5d87cb4461cd97559267e10a46afb335c2faec7d5dde5bdf90c"} Nov 21 13:45:16 crc kubenswrapper[5133]: I1121 13:45:16.798703 5133 patch_prober.go:28] interesting pod/router-default-5444994796-x7zf8 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 21 13:45:16 crc kubenswrapper[5133]: [-]has-synced failed: reason withheld Nov 21 13:45:16 crc kubenswrapper[5133]: [+]process-running ok Nov 21 13:45:16 crc kubenswrapper[5133]: healthz check failed Nov 21 13:45:16 crc kubenswrapper[5133]: I1121 13:45:16.799229 5133 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-x7zf8" podUID="dc4b9d28-3c54-466b-b41d-0f1381490b02" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 21 13:45:17 crc kubenswrapper[5133]: I1121 13:45:17.804036 5133 patch_prober.go:28] interesting pod/router-default-5444994796-x7zf8 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 21 13:45:17 crc kubenswrapper[5133]: [-]has-synced failed: reason withheld Nov 21 13:45:17 crc kubenswrapper[5133]: [+]process-running ok Nov 21 13:45:17 crc kubenswrapper[5133]: healthz check failed Nov 21 13:45:17 crc kubenswrapper[5133]: I1121 13:45:17.804096 5133 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-x7zf8" podUID="dc4b9d28-3c54-466b-b41d-0f1381490b02" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 21 13:45:18 crc kubenswrapper[5133]: I1121 13:45:18.797278 5133 patch_prober.go:28] interesting pod/router-default-5444994796-x7zf8 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 21 13:45:18 crc kubenswrapper[5133]: [-]has-synced failed: reason withheld Nov 21 13:45:18 crc kubenswrapper[5133]: [+]process-running ok Nov 21 13:45:18 crc kubenswrapper[5133]: healthz check failed Nov 21 13:45:18 crc kubenswrapper[5133]: I1121 13:45:18.797630 5133 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-x7zf8" podUID="dc4b9d28-3c54-466b-b41d-0f1381490b02" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 21 13:45:19 crc kubenswrapper[5133]: I1121 13:45:19.697718 5133 patch_prober.go:28] interesting pod/downloads-7954f5f757-7jhbl container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.32:8080/\": dial tcp 10.217.0.32:8080: connect: connection refused" start-of-body= Nov 21 13:45:19 crc kubenswrapper[5133]: I1121 13:45:19.697774 5133 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-7jhbl" podUID="bb29fa4c-cfd8-45b5-a0d3-895fb20d5cb8" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.32:8080/\": dial tcp 10.217.0.32:8080: connect: connection refused" Nov 21 13:45:19 crc kubenswrapper[5133]: I1121 13:45:19.697818 5133 patch_prober.go:28] interesting pod/downloads-7954f5f757-7jhbl container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.32:8080/\": dial tcp 10.217.0.32:8080: connect: connection refused" start-of-body= Nov 21 13:45:19 crc kubenswrapper[5133]: I1121 13:45:19.697897 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-7jhbl" podUID="bb29fa4c-cfd8-45b5-a0d3-895fb20d5cb8" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.32:8080/\": dial tcp 10.217.0.32:8080: connect: connection refused" Nov 21 13:45:19 crc kubenswrapper[5133]: I1121 13:45:19.797437 5133 patch_prober.go:28] interesting pod/router-default-5444994796-x7zf8 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 21 13:45:19 crc kubenswrapper[5133]: [-]has-synced failed: reason withheld Nov 21 13:45:19 crc kubenswrapper[5133]: [+]process-running ok Nov 21 13:45:19 crc kubenswrapper[5133]: healthz check failed Nov 21 13:45:19 crc kubenswrapper[5133]: I1121 13:45:19.797516 5133 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-x7zf8" podUID="dc4b9d28-3c54-466b-b41d-0f1381490b02" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 21 13:45:20 crc kubenswrapper[5133]: I1121 13:45:20.061840 5133 patch_prober.go:28] interesting pod/console-f9d7485db-m54n8 container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.13:8443/health\": dial tcp 10.217.0.13:8443: connect: connection refused" start-of-body= Nov 21 13:45:20 crc kubenswrapper[5133]: I1121 13:45:20.061903 5133 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-m54n8" podUID="d1ec861e-fbe3-412e-9885-43a9e3c5be1e" containerName="console" probeResult="failure" output="Get \"https://10.217.0.13:8443/health\": dial tcp 10.217.0.13:8443: connect: connection refused" Nov 21 13:45:20 crc kubenswrapper[5133]: I1121 13:45:20.796156 5133 patch_prober.go:28] interesting pod/router-default-5444994796-x7zf8 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 21 13:45:20 crc kubenswrapper[5133]: [+]has-synced ok Nov 21 13:45:20 crc kubenswrapper[5133]: [+]process-running ok Nov 21 13:45:20 crc kubenswrapper[5133]: healthz check failed Nov 21 13:45:20 crc kubenswrapper[5133]: I1121 13:45:20.796231 5133 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-x7zf8" podUID="dc4b9d28-3c54-466b-b41d-0f1381490b02" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 21 13:45:21 crc kubenswrapper[5133]: I1121 13:45:21.797698 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-x7zf8" Nov 21 13:45:21 crc kubenswrapper[5133]: I1121 13:45:21.800354 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-x7zf8" Nov 21 13:45:23 crc kubenswrapper[5133]: I1121 13:45:23.311128 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 13:45:23 crc kubenswrapper[5133]: I1121 13:45:23.311243 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 13:45:24 crc kubenswrapper[5133]: I1121 13:45:24.751805 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 21 13:45:24 crc kubenswrapper[5133]: I1121 13:45:24.837945 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/6a752751-f0e9-4016-aeea-ca2df83e2981-kubelet-dir\") pod \"6a752751-f0e9-4016-aeea-ca2df83e2981\" (UID: \"6a752751-f0e9-4016-aeea-ca2df83e2981\") " Nov 21 13:45:24 crc kubenswrapper[5133]: I1121 13:45:24.838081 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6a752751-f0e9-4016-aeea-ca2df83e2981-kube-api-access\") pod \"6a752751-f0e9-4016-aeea-ca2df83e2981\" (UID: \"6a752751-f0e9-4016-aeea-ca2df83e2981\") " Nov 21 13:45:24 crc kubenswrapper[5133]: I1121 13:45:24.838099 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6a752751-f0e9-4016-aeea-ca2df83e2981-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "6a752751-f0e9-4016-aeea-ca2df83e2981" (UID: "6a752751-f0e9-4016-aeea-ca2df83e2981"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 13:45:24 crc kubenswrapper[5133]: I1121 13:45:24.838398 5133 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/6a752751-f0e9-4016-aeea-ca2df83e2981-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 21 13:45:24 crc kubenswrapper[5133]: I1121 13:45:24.844831 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6a752751-f0e9-4016-aeea-ca2df83e2981-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "6a752751-f0e9-4016-aeea-ca2df83e2981" (UID: "6a752751-f0e9-4016-aeea-ca2df83e2981"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:45:24 crc kubenswrapper[5133]: I1121 13:45:24.939841 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6a752751-f0e9-4016-aeea-ca2df83e2981-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 21 13:45:25 crc kubenswrapper[5133]: I1121 13:45:25.359232 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"6a752751-f0e9-4016-aeea-ca2df83e2981","Type":"ContainerDied","Data":"0d5ea965f8cb6328752359c276ec7a7386cf655259ca2e6aba2487d05173cb27"} Nov 21 13:45:25 crc kubenswrapper[5133]: I1121 13:45:25.359294 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0d5ea965f8cb6328752359c276ec7a7386cf655259ca2e6aba2487d05173cb27" Nov 21 13:45:25 crc kubenswrapper[5133]: I1121 13:45:25.359606 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 21 13:45:26 crc kubenswrapper[5133]: I1121 13:45:26.369767 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-x5wnh" event={"ID":"b3aabda0-97d9-4886-8909-1c423c4d3238","Type":"ContainerStarted","Data":"c175b2ead2bf8a3234a8deeb3ac59d9b50324a554d4b5bcee50369843a5137b6"} Nov 21 13:45:27 crc kubenswrapper[5133]: I1121 13:45:27.395668 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-x5wnh" podStartSLOduration=155.395641231 podStartE2EDuration="2m35.395641231s" podCreationTimestamp="2025-11-21 13:42:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 13:45:27.392796155 +0000 UTC m=+187.190628413" watchObservedRunningTime="2025-11-21 13:45:27.395641231 +0000 UTC m=+187.193473519" Nov 21 13:45:29 crc kubenswrapper[5133]: I1121 13:45:29.486141 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:45:29 crc kubenswrapper[5133]: I1121 13:45:29.697674 5133 patch_prober.go:28] interesting pod/downloads-7954f5f757-7jhbl container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.32:8080/\": dial tcp 10.217.0.32:8080: connect: connection refused" start-of-body= Nov 21 13:45:29 crc kubenswrapper[5133]: I1121 13:45:29.697757 5133 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-7jhbl" podUID="bb29fa4c-cfd8-45b5-a0d3-895fb20d5cb8" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.32:8080/\": dial tcp 10.217.0.32:8080: connect: connection refused" Nov 21 13:45:29 crc kubenswrapper[5133]: I1121 13:45:29.697790 5133 patch_prober.go:28] interesting pod/downloads-7954f5f757-7jhbl container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.32:8080/\": dial tcp 10.217.0.32:8080: connect: connection refused" start-of-body= Nov 21 13:45:29 crc kubenswrapper[5133]: I1121 13:45:29.697844 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-7jhbl" podUID="bb29fa4c-cfd8-45b5-a0d3-895fb20d5cb8" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.32:8080/\": dial tcp 10.217.0.32:8080: connect: connection refused" Nov 21 13:45:29 crc kubenswrapper[5133]: I1121 13:45:29.697897 5133 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-console/downloads-7954f5f757-7jhbl" Nov 21 13:45:29 crc kubenswrapper[5133]: I1121 13:45:29.698587 5133 patch_prober.go:28] interesting pod/downloads-7954f5f757-7jhbl container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.32:8080/\": dial tcp 10.217.0.32:8080: connect: connection refused" start-of-body= Nov 21 13:45:29 crc kubenswrapper[5133]: I1121 13:45:29.698538 5133 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="download-server" containerStatusID={"Type":"cri-o","ID":"c5d280b0aaabc91d3083c2ef14bc9971e16b2c1ced1bbcefe113ed2dca6c96cf"} pod="openshift-console/downloads-7954f5f757-7jhbl" containerMessage="Container download-server failed liveness probe, will be restarted" Nov 21 13:45:29 crc kubenswrapper[5133]: I1121 13:45:29.698623 5133 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-7jhbl" podUID="bb29fa4c-cfd8-45b5-a0d3-895fb20d5cb8" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.32:8080/\": dial tcp 10.217.0.32:8080: connect: connection refused" Nov 21 13:45:29 crc kubenswrapper[5133]: I1121 13:45:29.698641 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/downloads-7954f5f757-7jhbl" podUID="bb29fa4c-cfd8-45b5-a0d3-895fb20d5cb8" containerName="download-server" containerID="cri-o://c5d280b0aaabc91d3083c2ef14bc9971e16b2c1ced1bbcefe113ed2dca6c96cf" gracePeriod=2 Nov 21 13:45:30 crc kubenswrapper[5133]: I1121 13:45:30.066811 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-m54n8" Nov 21 13:45:30 crc kubenswrapper[5133]: I1121 13:45:30.077892 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-m54n8" Nov 21 13:45:31 crc kubenswrapper[5133]: I1121 13:45:31.413430 5133 generic.go:334] "Generic (PLEG): container finished" podID="bb29fa4c-cfd8-45b5-a0d3-895fb20d5cb8" containerID="c5d280b0aaabc91d3083c2ef14bc9971e16b2c1ced1bbcefe113ed2dca6c96cf" exitCode=0 Nov 21 13:45:31 crc kubenswrapper[5133]: I1121 13:45:31.413517 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-7jhbl" event={"ID":"bb29fa4c-cfd8-45b5-a0d3-895fb20d5cb8","Type":"ContainerDied","Data":"c5d280b0aaabc91d3083c2ef14bc9971e16b2c1ced1bbcefe113ed2dca6c96cf"} Nov 21 13:45:36 crc kubenswrapper[5133]: I1121 13:45:36.409239 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 21 13:45:39 crc kubenswrapper[5133]: I1121 13:45:39.697712 5133 patch_prober.go:28] interesting pod/downloads-7954f5f757-7jhbl container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.32:8080/\": dial tcp 10.217.0.32:8080: connect: connection refused" start-of-body= Nov 21 13:45:39 crc kubenswrapper[5133]: I1121 13:45:39.698324 5133 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-7jhbl" podUID="bb29fa4c-cfd8-45b5-a0d3-895fb20d5cb8" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.32:8080/\": dial tcp 10.217.0.32:8080: connect: connection refused" Nov 21 13:45:40 crc kubenswrapper[5133]: I1121 13:45:40.467598 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-bhlrh" Nov 21 13:45:42 crc kubenswrapper[5133]: E1121 13:45:42.773878 5133 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Nov 21 13:45:42 crc kubenswrapper[5133]: E1121 13:45:42.774206 5133 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-5ffjm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-rfbh2_openshift-marketplace(a6b4a203-3329-4e08-b2d6-61ec7ee18e7e): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 21 13:45:42 crc kubenswrapper[5133]: E1121 13:45:42.775499 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-rfbh2" podUID="a6b4a203-3329-4e08-b2d6-61ec7ee18e7e" Nov 21 13:45:48 crc kubenswrapper[5133]: E1121 13:45:48.840842 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-rfbh2" podUID="a6b4a203-3329-4e08-b2d6-61ec7ee18e7e" Nov 21 13:45:49 crc kubenswrapper[5133]: I1121 13:45:49.699410 5133 patch_prober.go:28] interesting pod/downloads-7954f5f757-7jhbl container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.32:8080/\": dial tcp 10.217.0.32:8080: connect: connection refused" start-of-body= Nov 21 13:45:49 crc kubenswrapper[5133]: I1121 13:45:49.699510 5133 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-7jhbl" podUID="bb29fa4c-cfd8-45b5-a0d3-895fb20d5cb8" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.32:8080/\": dial tcp 10.217.0.32:8080: connect: connection refused" Nov 21 13:45:50 crc kubenswrapper[5133]: I1121 13:45:50.500651 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="hostpath-provisioner/csi-hostpathplugin-9zjjz" podUID="63aa8ca7-9e72-4f3c-85e9-b9a423371b21" containerName="hostpath-provisioner" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 21 13:45:53 crc kubenswrapper[5133]: I1121 13:45:53.310543 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 13:45:53 crc kubenswrapper[5133]: I1121 13:45:53.310654 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 13:45:53 crc kubenswrapper[5133]: I1121 13:45:53.310744 5133 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" Nov 21 13:45:53 crc kubenswrapper[5133]: I1121 13:45:53.312303 5133 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"9e5c730e837240b2ed45dff8a5411b8b49d21e7fbfb2dfcc6aef568b73b57745"} pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 13:45:53 crc kubenswrapper[5133]: I1121 13:45:53.312818 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" containerID="cri-o://9e5c730e837240b2ed45dff8a5411b8b49d21e7fbfb2dfcc6aef568b73b57745" gracePeriod=600 Nov 21 13:45:53 crc kubenswrapper[5133]: E1121 13:45:53.937172 5133 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 21 13:45:53 crc kubenswrapper[5133]: E1121 13:45:53.937476 5133 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-glkqz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-xp47m_openshift-marketplace(ce3b64c0-a2cb-416d-9c9a-14c10514bfac): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 21 13:45:53 crc kubenswrapper[5133]: E1121 13:45:53.938601 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-xp47m" podUID="ce3b64c0-a2cb-416d-9c9a-14c10514bfac" Nov 21 13:45:53 crc kubenswrapper[5133]: E1121 13:45:53.941200 5133 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 21 13:45:53 crc kubenswrapper[5133]: E1121 13:45:53.941459 5133 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-4jjqm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-b9rk5_openshift-marketplace(94664536-aa38-4b5b-b7b2-fa28904da365): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 21 13:45:53 crc kubenswrapper[5133]: E1121 13:45:53.943960 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-b9rk5" podUID="94664536-aa38-4b5b-b7b2-fa28904da365" Nov 21 13:45:53 crc kubenswrapper[5133]: E1121 13:45:53.952232 5133 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Nov 21 13:45:53 crc kubenswrapper[5133]: E1121 13:45:53.952420 5133 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-zkw7w,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-w6r59_openshift-marketplace(297f6c8b-efd5-4df1-901e-d66882a776c5): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 21 13:45:53 crc kubenswrapper[5133]: E1121 13:45:53.954205 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-w6r59" podUID="297f6c8b-efd5-4df1-901e-d66882a776c5" Nov 21 13:45:57 crc kubenswrapper[5133]: I1121 13:45:57.601462 5133 generic.go:334] "Generic (PLEG): container finished" podID="52f5a729-05d1-4f84-a216-1df3233af57d" containerID="9e5c730e837240b2ed45dff8a5411b8b49d21e7fbfb2dfcc6aef568b73b57745" exitCode=0 Nov 21 13:45:57 crc kubenswrapper[5133]: I1121 13:45:57.601556 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" event={"ID":"52f5a729-05d1-4f84-a216-1df3233af57d","Type":"ContainerDied","Data":"9e5c730e837240b2ed45dff8a5411b8b49d21e7fbfb2dfcc6aef568b73b57745"} Nov 21 13:45:59 crc kubenswrapper[5133]: I1121 13:45:59.700422 5133 patch_prober.go:28] interesting pod/downloads-7954f5f757-7jhbl container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.32:8080/\": dial tcp 10.217.0.32:8080: connect: connection refused" start-of-body= Nov 21 13:45:59 crc kubenswrapper[5133]: I1121 13:45:59.700515 5133 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-7jhbl" podUID="bb29fa4c-cfd8-45b5-a0d3-895fb20d5cb8" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.32:8080/\": dial tcp 10.217.0.32:8080: connect: connection refused" Nov 21 13:46:03 crc kubenswrapper[5133]: E1121 13:46:03.797704 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-w6r59" podUID="297f6c8b-efd5-4df1-901e-d66882a776c5" Nov 21 13:46:03 crc kubenswrapper[5133]: E1121 13:46:03.797858 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-b9rk5" podUID="94664536-aa38-4b5b-b7b2-fa28904da365" Nov 21 13:46:03 crc kubenswrapper[5133]: E1121 13:46:03.798164 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-xp47m" podUID="ce3b64c0-a2cb-416d-9c9a-14c10514bfac" Nov 21 13:46:08 crc kubenswrapper[5133]: E1121 13:46:08.952397 5133 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Nov 21 13:46:08 crc kubenswrapper[5133]: E1121 13:46:08.953157 5133 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-c945m,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-pm4gn_openshift-marketplace(8997b5b2-53b0-4b74-92db-7e82f3b44ea2): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 21 13:46:08 crc kubenswrapper[5133]: E1121 13:46:08.954425 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-pm4gn" podUID="8997b5b2-53b0-4b74-92db-7e82f3b44ea2" Nov 21 13:46:09 crc kubenswrapper[5133]: I1121 13:46:09.697925 5133 patch_prober.go:28] interesting pod/downloads-7954f5f757-7jhbl container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.32:8080/\": dial tcp 10.217.0.32:8080: connect: connection refused" start-of-body= Nov 21 13:46:09 crc kubenswrapper[5133]: I1121 13:46:09.698069 5133 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-7jhbl" podUID="bb29fa4c-cfd8-45b5-a0d3-895fb20d5cb8" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.32:8080/\": dial tcp 10.217.0.32:8080: connect: connection refused" Nov 21 13:46:13 crc kubenswrapper[5133]: E1121 13:46:13.785203 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-pm4gn" podUID="8997b5b2-53b0-4b74-92db-7e82f3b44ea2" Nov 21 13:46:13 crc kubenswrapper[5133]: E1121 13:46:13.806283 5133 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Nov 21 13:46:13 crc kubenswrapper[5133]: E1121 13:46:13.806730 5133 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-bjwfx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-bbj2q_openshift-marketplace(c292c11b-bba4-40c5-a591-ba3f896e610e): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 21 13:46:13 crc kubenswrapper[5133]: E1121 13:46:13.808455 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-bbj2q" podUID="c292c11b-bba4-40c5-a591-ba3f896e610e" Nov 21 13:46:19 crc kubenswrapper[5133]: I1121 13:46:19.699112 5133 patch_prober.go:28] interesting pod/downloads-7954f5f757-7jhbl container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.32:8080/\": dial tcp 10.217.0.32:8080: connect: connection refused" start-of-body= Nov 21 13:46:19 crc kubenswrapper[5133]: I1121 13:46:19.699880 5133 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-7jhbl" podUID="bb29fa4c-cfd8-45b5-a0d3-895fb20d5cb8" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.32:8080/\": dial tcp 10.217.0.32:8080: connect: connection refused" Nov 21 13:46:26 crc kubenswrapper[5133]: E1121 13:46:26.374850 5133 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Nov 21 13:46:26 crc kubenswrapper[5133]: E1121 13:46:26.375586 5133 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-4cs8c,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-wt7r9_openshift-marketplace(cb05a95a-9c39-4c50-b64b-91d20d5d5ebc): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 21 13:46:26 crc kubenswrapper[5133]: E1121 13:46:26.377235 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-wt7r9" podUID="cb05a95a-9c39-4c50-b64b-91d20d5d5ebc" Nov 21 13:46:29 crc kubenswrapper[5133]: E1121 13:46:29.454974 5133 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Nov 21 13:46:29 crc kubenswrapper[5133]: E1121 13:46:29.456385 5133 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-rlrsx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-2z48l_openshift-marketplace(a76889ae-3a7d-4d58-a381-9b8082db6f4a): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 21 13:46:29 crc kubenswrapper[5133]: E1121 13:46:29.457730 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-2z48l" podUID="a76889ae-3a7d-4d58-a381-9b8082db6f4a" Nov 21 13:46:29 crc kubenswrapper[5133]: I1121 13:46:29.698429 5133 patch_prober.go:28] interesting pod/downloads-7954f5f757-7jhbl container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.32:8080/\": dial tcp 10.217.0.32:8080: connect: connection refused" start-of-body= Nov 21 13:46:29 crc kubenswrapper[5133]: I1121 13:46:29.699292 5133 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-7jhbl" podUID="bb29fa4c-cfd8-45b5-a0d3-895fb20d5cb8" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.32:8080/\": dial tcp 10.217.0.32:8080: connect: connection refused" Nov 21 13:46:29 crc kubenswrapper[5133]: I1121 13:46:29.851154 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-7jhbl" event={"ID":"bb29fa4c-cfd8-45b5-a0d3-895fb20d5cb8","Type":"ContainerStarted","Data":"92ec47599766abd340e321190b0e4b2b956797889ea9ad505d1c465a6320406c"} Nov 21 13:46:29 crc kubenswrapper[5133]: I1121 13:46:29.851406 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-7jhbl" Nov 21 13:46:29 crc kubenswrapper[5133]: I1121 13:46:29.853347 5133 patch_prober.go:28] interesting pod/downloads-7954f5f757-7jhbl container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.32:8080/\": dial tcp 10.217.0.32:8080: connect: connection refused" start-of-body= Nov 21 13:46:29 crc kubenswrapper[5133]: I1121 13:46:29.853431 5133 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-7jhbl" podUID="bb29fa4c-cfd8-45b5-a0d3-895fb20d5cb8" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.32:8080/\": dial tcp 10.217.0.32:8080: connect: connection refused" Nov 21 13:46:29 crc kubenswrapper[5133]: I1121 13:46:29.858487 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" event={"ID":"52f5a729-05d1-4f84-a216-1df3233af57d","Type":"ContainerStarted","Data":"dd93f35af870e12207633edd0793e79b2dcd5d4e2167c242b49f29a86ebd07a8"} Nov 21 13:46:30 crc kubenswrapper[5133]: E1121 13:46:30.075776 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-2z48l" podUID="a76889ae-3a7d-4d58-a381-9b8082db6f4a" Nov 21 13:46:30 crc kubenswrapper[5133]: I1121 13:46:30.877318 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bbj2q" event={"ID":"c292c11b-bba4-40c5-a591-ba3f896e610e","Type":"ContainerStarted","Data":"7fc9eaed7c2d55bcb144595c8736c6bd3b3541f9274989efa22ace1c794d99d2"} Nov 21 13:46:30 crc kubenswrapper[5133]: I1121 13:46:30.879183 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-b9rk5" event={"ID":"94664536-aa38-4b5b-b7b2-fa28904da365","Type":"ContainerStarted","Data":"b41c2568cdc3bb24b2d7e89bc42e4d2a614dc4bbedaff8c99ec5bbb8a92535e5"} Nov 21 13:46:30 crc kubenswrapper[5133]: I1121 13:46:30.881588 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xp47m" event={"ID":"ce3b64c0-a2cb-416d-9c9a-14c10514bfac","Type":"ContainerStarted","Data":"86431023d5b4b836ae566d1d7b4e5681337574f42d6f5dbed6bd8aded0e355dc"} Nov 21 13:46:30 crc kubenswrapper[5133]: I1121 13:46:30.884136 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-w6r59" event={"ID":"297f6c8b-efd5-4df1-901e-d66882a776c5","Type":"ContainerStarted","Data":"ee71c4446d5429d79f2a22fa204673853e3755fff8c17082ac9db397f8722b37"} Nov 21 13:46:30 crc kubenswrapper[5133]: I1121 13:46:30.886048 5133 generic.go:334] "Generic (PLEG): container finished" podID="a6b4a203-3329-4e08-b2d6-61ec7ee18e7e" containerID="52875960dd69a741670e6b5f81662dd6644dd707a21d4f9aa8d548fe2c8b2fc0" exitCode=0 Nov 21 13:46:30 crc kubenswrapper[5133]: I1121 13:46:30.886098 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rfbh2" event={"ID":"a6b4a203-3329-4e08-b2d6-61ec7ee18e7e","Type":"ContainerDied","Data":"52875960dd69a741670e6b5f81662dd6644dd707a21d4f9aa8d548fe2c8b2fc0"} Nov 21 13:46:30 crc kubenswrapper[5133]: I1121 13:46:30.890213 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pm4gn" event={"ID":"8997b5b2-53b0-4b74-92db-7e82f3b44ea2","Type":"ContainerStarted","Data":"cd3c62a381ab2d6afa736d8e1259a5c23aa8ed6f53829a80439e95f2d5bcfd50"} Nov 21 13:46:30 crc kubenswrapper[5133]: I1121 13:46:30.891073 5133 patch_prober.go:28] interesting pod/downloads-7954f5f757-7jhbl container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.32:8080/\": dial tcp 10.217.0.32:8080: connect: connection refused" start-of-body= Nov 21 13:46:30 crc kubenswrapper[5133]: I1121 13:46:30.891130 5133 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-7jhbl" podUID="bb29fa4c-cfd8-45b5-a0d3-895fb20d5cb8" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.32:8080/\": dial tcp 10.217.0.32:8080: connect: connection refused" Nov 21 13:46:31 crc kubenswrapper[5133]: I1121 13:46:31.897451 5133 generic.go:334] "Generic (PLEG): container finished" podID="8997b5b2-53b0-4b74-92db-7e82f3b44ea2" containerID="cd3c62a381ab2d6afa736d8e1259a5c23aa8ed6f53829a80439e95f2d5bcfd50" exitCode=0 Nov 21 13:46:31 crc kubenswrapper[5133]: I1121 13:46:31.898151 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pm4gn" event={"ID":"8997b5b2-53b0-4b74-92db-7e82f3b44ea2","Type":"ContainerDied","Data":"cd3c62a381ab2d6afa736d8e1259a5c23aa8ed6f53829a80439e95f2d5bcfd50"} Nov 21 13:46:31 crc kubenswrapper[5133]: I1121 13:46:31.901885 5133 generic.go:334] "Generic (PLEG): container finished" podID="c292c11b-bba4-40c5-a591-ba3f896e610e" containerID="7fc9eaed7c2d55bcb144595c8736c6bd3b3541f9274989efa22ace1c794d99d2" exitCode=0 Nov 21 13:46:31 crc kubenswrapper[5133]: I1121 13:46:31.901949 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bbj2q" event={"ID":"c292c11b-bba4-40c5-a591-ba3f896e610e","Type":"ContainerDied","Data":"7fc9eaed7c2d55bcb144595c8736c6bd3b3541f9274989efa22ace1c794d99d2"} Nov 21 13:46:31 crc kubenswrapper[5133]: I1121 13:46:31.905887 5133 generic.go:334] "Generic (PLEG): container finished" podID="94664536-aa38-4b5b-b7b2-fa28904da365" containerID="b41c2568cdc3bb24b2d7e89bc42e4d2a614dc4bbedaff8c99ec5bbb8a92535e5" exitCode=0 Nov 21 13:46:31 crc kubenswrapper[5133]: I1121 13:46:31.905946 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-b9rk5" event={"ID":"94664536-aa38-4b5b-b7b2-fa28904da365","Type":"ContainerDied","Data":"b41c2568cdc3bb24b2d7e89bc42e4d2a614dc4bbedaff8c99ec5bbb8a92535e5"} Nov 21 13:46:31 crc kubenswrapper[5133]: I1121 13:46:31.908602 5133 generic.go:334] "Generic (PLEG): container finished" podID="ce3b64c0-a2cb-416d-9c9a-14c10514bfac" containerID="86431023d5b4b836ae566d1d7b4e5681337574f42d6f5dbed6bd8aded0e355dc" exitCode=0 Nov 21 13:46:31 crc kubenswrapper[5133]: I1121 13:46:31.908663 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xp47m" event={"ID":"ce3b64c0-a2cb-416d-9c9a-14c10514bfac","Type":"ContainerDied","Data":"86431023d5b4b836ae566d1d7b4e5681337574f42d6f5dbed6bd8aded0e355dc"} Nov 21 13:46:31 crc kubenswrapper[5133]: I1121 13:46:31.914223 5133 generic.go:334] "Generic (PLEG): container finished" podID="297f6c8b-efd5-4df1-901e-d66882a776c5" containerID="ee71c4446d5429d79f2a22fa204673853e3755fff8c17082ac9db397f8722b37" exitCode=0 Nov 21 13:46:31 crc kubenswrapper[5133]: I1121 13:46:31.914292 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-w6r59" event={"ID":"297f6c8b-efd5-4df1-901e-d66882a776c5","Type":"ContainerDied","Data":"ee71c4446d5429d79f2a22fa204673853e3755fff8c17082ac9db397f8722b37"} Nov 21 13:46:35 crc kubenswrapper[5133]: I1121 13:46:35.956187 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rfbh2" event={"ID":"a6b4a203-3329-4e08-b2d6-61ec7ee18e7e","Type":"ContainerStarted","Data":"2dfe1c6ac873af7082b6127a19dc85483e793fb345768fb8ef355c448475f62a"} Nov 21 13:46:35 crc kubenswrapper[5133]: I1121 13:46:35.959354 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pm4gn" event={"ID":"8997b5b2-53b0-4b74-92db-7e82f3b44ea2","Type":"ContainerStarted","Data":"e672f71dd43f6c5fd47f09c5cddcf56f06f7047dab6afc91679d56327ffb0be8"} Nov 21 13:46:36 crc kubenswrapper[5133]: I1121 13:46:36.997579 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-pm4gn" podStartSLOduration=4.396656129 podStartE2EDuration="1m27.997556544s" podCreationTimestamp="2025-11-21 13:45:09 +0000 UTC" firstStartedPulling="2025-11-21 13:45:11.180384103 +0000 UTC m=+170.978216351" lastFinishedPulling="2025-11-21 13:46:34.781284508 +0000 UTC m=+254.579116766" observedRunningTime="2025-11-21 13:46:36.996221219 +0000 UTC m=+256.794053517" watchObservedRunningTime="2025-11-21 13:46:36.997556544 +0000 UTC m=+256.795388812" Nov 21 13:46:37 crc kubenswrapper[5133]: I1121 13:46:37.028461 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-rfbh2" podStartSLOduration=5.631407112 podStartE2EDuration="1m30.028433009s" podCreationTimestamp="2025-11-21 13:45:07 +0000 UTC" firstStartedPulling="2025-11-21 13:45:09.066817846 +0000 UTC m=+168.864650094" lastFinishedPulling="2025-11-21 13:46:33.463843743 +0000 UTC m=+253.261675991" observedRunningTime="2025-11-21 13:46:37.024417473 +0000 UTC m=+256.822249731" watchObservedRunningTime="2025-11-21 13:46:37.028433009 +0000 UTC m=+256.826265297" Nov 21 13:46:37 crc kubenswrapper[5133]: I1121 13:46:37.968328 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-rfbh2" Nov 21 13:46:37 crc kubenswrapper[5133]: I1121 13:46:37.968416 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-rfbh2" Nov 21 13:46:39 crc kubenswrapper[5133]: I1121 13:46:39.005953 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-w6r59" event={"ID":"297f6c8b-efd5-4df1-901e-d66882a776c5","Type":"ContainerStarted","Data":"d24e2d336a52a941a92c4bfbb6fd3021de0c375396e5f82f24b85d2d16e51178"} Nov 21 13:46:39 crc kubenswrapper[5133]: I1121 13:46:39.698484 5133 patch_prober.go:28] interesting pod/downloads-7954f5f757-7jhbl container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.32:8080/\": dial tcp 10.217.0.32:8080: connect: connection refused" start-of-body= Nov 21 13:46:39 crc kubenswrapper[5133]: I1121 13:46:39.698909 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-7jhbl" podUID="bb29fa4c-cfd8-45b5-a0d3-895fb20d5cb8" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.32:8080/\": dial tcp 10.217.0.32:8080: connect: connection refused" Nov 21 13:46:39 crc kubenswrapper[5133]: I1121 13:46:39.698484 5133 patch_prober.go:28] interesting pod/downloads-7954f5f757-7jhbl container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.32:8080/\": dial tcp 10.217.0.32:8080: connect: connection refused" start-of-body= Nov 21 13:46:39 crc kubenswrapper[5133]: I1121 13:46:39.698991 5133 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-7jhbl" podUID="bb29fa4c-cfd8-45b5-a0d3-895fb20d5cb8" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.32:8080/\": dial tcp 10.217.0.32:8080: connect: connection refused" Nov 21 13:46:39 crc kubenswrapper[5133]: I1121 13:46:39.975328 5133 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-rfbh2" podUID="a6b4a203-3329-4e08-b2d6-61ec7ee18e7e" containerName="registry-server" probeResult="failure" output=< Nov 21 13:46:39 crc kubenswrapper[5133]: timeout: failed to connect service ":50051" within 1s Nov 21 13:46:39 crc kubenswrapper[5133]: > Nov 21 13:46:40 crc kubenswrapper[5133]: I1121 13:46:40.012522 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-pm4gn" Nov 21 13:46:40 crc kubenswrapper[5133]: I1121 13:46:40.012600 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-pm4gn" Nov 21 13:46:40 crc kubenswrapper[5133]: I1121 13:46:40.037958 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-w6r59" podStartSLOduration=3.252990832 podStartE2EDuration="1m32.037942596s" podCreationTimestamp="2025-11-21 13:45:08 +0000 UTC" firstStartedPulling="2025-11-21 13:45:09.064788122 +0000 UTC m=+168.862620370" lastFinishedPulling="2025-11-21 13:46:37.849739846 +0000 UTC m=+257.647572134" observedRunningTime="2025-11-21 13:46:40.036292193 +0000 UTC m=+259.834124471" watchObservedRunningTime="2025-11-21 13:46:40.037942596 +0000 UTC m=+259.835774834" Nov 21 13:46:41 crc kubenswrapper[5133]: I1121 13:46:41.051949 5133 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-pm4gn" podUID="8997b5b2-53b0-4b74-92db-7e82f3b44ea2" containerName="registry-server" probeResult="failure" output=< Nov 21 13:46:41 crc kubenswrapper[5133]: timeout: failed to connect service ":50051" within 1s Nov 21 13:46:41 crc kubenswrapper[5133]: > Nov 21 13:46:48 crc kubenswrapper[5133]: I1121 13:46:48.126957 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-rfbh2" Nov 21 13:46:48 crc kubenswrapper[5133]: I1121 13:46:48.177708 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-rfbh2" Nov 21 13:46:48 crc kubenswrapper[5133]: I1121 13:46:48.418452 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-w6r59" Nov 21 13:46:48 crc kubenswrapper[5133]: I1121 13:46:48.418610 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-w6r59" Nov 21 13:46:48 crc kubenswrapper[5133]: I1121 13:46:48.476097 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-w6r59" Nov 21 13:46:49 crc kubenswrapper[5133]: I1121 13:46:49.132859 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-w6r59" Nov 21 13:46:49 crc kubenswrapper[5133]: I1121 13:46:49.726802 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-7jhbl" Nov 21 13:46:50 crc kubenswrapper[5133]: I1121 13:46:50.088025 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-pm4gn" Nov 21 13:46:50 crc kubenswrapper[5133]: I1121 13:46:50.143717 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-pm4gn" Nov 21 13:46:50 crc kubenswrapper[5133]: I1121 13:46:50.571863 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-w6r59"] Nov 21 13:46:51 crc kubenswrapper[5133]: I1121 13:46:51.083709 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-w6r59" podUID="297f6c8b-efd5-4df1-901e-d66882a776c5" containerName="registry-server" containerID="cri-o://d24e2d336a52a941a92c4bfbb6fd3021de0c375396e5f82f24b85d2d16e51178" gracePeriod=2 Nov 21 13:46:52 crc kubenswrapper[5133]: I1121 13:46:52.895210 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-w6r59" Nov 21 13:46:53 crc kubenswrapper[5133]: I1121 13:46:53.040031 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkw7w\" (UniqueName: \"kubernetes.io/projected/297f6c8b-efd5-4df1-901e-d66882a776c5-kube-api-access-zkw7w\") pod \"297f6c8b-efd5-4df1-901e-d66882a776c5\" (UID: \"297f6c8b-efd5-4df1-901e-d66882a776c5\") " Nov 21 13:46:53 crc kubenswrapper[5133]: I1121 13:46:53.040392 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/297f6c8b-efd5-4df1-901e-d66882a776c5-catalog-content\") pod \"297f6c8b-efd5-4df1-901e-d66882a776c5\" (UID: \"297f6c8b-efd5-4df1-901e-d66882a776c5\") " Nov 21 13:46:53 crc kubenswrapper[5133]: I1121 13:46:53.040539 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/297f6c8b-efd5-4df1-901e-d66882a776c5-utilities\") pod \"297f6c8b-efd5-4df1-901e-d66882a776c5\" (UID: \"297f6c8b-efd5-4df1-901e-d66882a776c5\") " Nov 21 13:46:53 crc kubenswrapper[5133]: I1121 13:46:53.041576 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/297f6c8b-efd5-4df1-901e-d66882a776c5-utilities" (OuterVolumeSpecName: "utilities") pod "297f6c8b-efd5-4df1-901e-d66882a776c5" (UID: "297f6c8b-efd5-4df1-901e-d66882a776c5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 13:46:53 crc kubenswrapper[5133]: I1121 13:46:53.046354 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/297f6c8b-efd5-4df1-901e-d66882a776c5-kube-api-access-zkw7w" (OuterVolumeSpecName: "kube-api-access-zkw7w") pod "297f6c8b-efd5-4df1-901e-d66882a776c5" (UID: "297f6c8b-efd5-4df1-901e-d66882a776c5"). InnerVolumeSpecName "kube-api-access-zkw7w". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:46:53 crc kubenswrapper[5133]: I1121 13:46:53.097289 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wt7r9" event={"ID":"cb05a95a-9c39-4c50-b64b-91d20d5d5ebc","Type":"ContainerStarted","Data":"a767564cb8bd4637e0d7e074663fa0f7a31f01c35b3ffaf202e950bf820c756f"} Nov 21 13:46:53 crc kubenswrapper[5133]: I1121 13:46:53.099308 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/297f6c8b-efd5-4df1-901e-d66882a776c5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "297f6c8b-efd5-4df1-901e-d66882a776c5" (UID: "297f6c8b-efd5-4df1-901e-d66882a776c5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 13:46:53 crc kubenswrapper[5133]: I1121 13:46:53.099674 5133 generic.go:334] "Generic (PLEG): container finished" podID="297f6c8b-efd5-4df1-901e-d66882a776c5" containerID="d24e2d336a52a941a92c4bfbb6fd3021de0c375396e5f82f24b85d2d16e51178" exitCode=0 Nov 21 13:46:53 crc kubenswrapper[5133]: I1121 13:46:53.099709 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-w6r59" event={"ID":"297f6c8b-efd5-4df1-901e-d66882a776c5","Type":"ContainerDied","Data":"d24e2d336a52a941a92c4bfbb6fd3021de0c375396e5f82f24b85d2d16e51178"} Nov 21 13:46:53 crc kubenswrapper[5133]: I1121 13:46:53.099746 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-w6r59" event={"ID":"297f6c8b-efd5-4df1-901e-d66882a776c5","Type":"ContainerDied","Data":"16b5c9b8939d799676b296e1a3be23df28e5ec38b08a13bdc59123692dc8587b"} Nov 21 13:46:53 crc kubenswrapper[5133]: I1121 13:46:53.099770 5133 scope.go:117] "RemoveContainer" containerID="d24e2d336a52a941a92c4bfbb6fd3021de0c375396e5f82f24b85d2d16e51178" Nov 21 13:46:53 crc kubenswrapper[5133]: I1121 13:46:53.099810 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-w6r59" Nov 21 13:46:53 crc kubenswrapper[5133]: I1121 13:46:53.104062 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bbj2q" event={"ID":"c292c11b-bba4-40c5-a591-ba3f896e610e","Type":"ContainerStarted","Data":"42aec161f28c3155b244e6d23d5396d1122eaa1a12aaa2697f1d62eafb836dbb"} Nov 21 13:46:53 crc kubenswrapper[5133]: I1121 13:46:53.110154 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-b9rk5" event={"ID":"94664536-aa38-4b5b-b7b2-fa28904da365","Type":"ContainerStarted","Data":"91035a622d42c138a9d506a786c79938cbc750b3a3455d468d31dbe0d6659caa"} Nov 21 13:46:53 crc kubenswrapper[5133]: I1121 13:46:53.112139 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xp47m" event={"ID":"ce3b64c0-a2cb-416d-9c9a-14c10514bfac","Type":"ContainerStarted","Data":"32ae11dc70213e101059c11f64fdf08b022c007b12cb3704ae30d903322b3dc4"} Nov 21 13:46:53 crc kubenswrapper[5133]: I1121 13:46:53.129072 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-w6r59"] Nov 21 13:46:53 crc kubenswrapper[5133]: I1121 13:46:53.137636 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-w6r59"] Nov 21 13:46:53 crc kubenswrapper[5133]: I1121 13:46:53.142239 5133 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/297f6c8b-efd5-4df1-901e-d66882a776c5-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 13:46:53 crc kubenswrapper[5133]: I1121 13:46:53.142275 5133 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/297f6c8b-efd5-4df1-901e-d66882a776c5-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 13:46:53 crc kubenswrapper[5133]: I1121 13:46:53.142285 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkw7w\" (UniqueName: \"kubernetes.io/projected/297f6c8b-efd5-4df1-901e-d66882a776c5-kube-api-access-zkw7w\") on node \"crc\" DevicePath \"\"" Nov 21 13:46:54 crc kubenswrapper[5133]: I1121 13:46:54.462922 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="297f6c8b-efd5-4df1-901e-d66882a776c5" path="/var/lib/kubelet/pods/297f6c8b-efd5-4df1-901e-d66882a776c5/volumes" Nov 21 13:46:55 crc kubenswrapper[5133]: I1121 13:46:55.103359 5133 scope.go:117] "RemoveContainer" containerID="ee71c4446d5429d79f2a22fa204673853e3755fff8c17082ac9db397f8722b37" Nov 21 13:46:55 crc kubenswrapper[5133]: I1121 13:46:55.126255 5133 generic.go:334] "Generic (PLEG): container finished" podID="cb05a95a-9c39-4c50-b64b-91d20d5d5ebc" containerID="a767564cb8bd4637e0d7e074663fa0f7a31f01c35b3ffaf202e950bf820c756f" exitCode=0 Nov 21 13:46:55 crc kubenswrapper[5133]: I1121 13:46:55.126329 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wt7r9" event={"ID":"cb05a95a-9c39-4c50-b64b-91d20d5d5ebc","Type":"ContainerDied","Data":"a767564cb8bd4637e0d7e074663fa0f7a31f01c35b3ffaf202e950bf820c756f"} Nov 21 13:46:55 crc kubenswrapper[5133]: I1121 13:46:55.133952 5133 scope.go:117] "RemoveContainer" containerID="79c1988725f1acd3f69110e5a469518dba6c71edea32306c440fdfe9d96d2dd8" Nov 21 13:46:55 crc kubenswrapper[5133]: I1121 13:46:55.155223 5133 scope.go:117] "RemoveContainer" containerID="d24e2d336a52a941a92c4bfbb6fd3021de0c375396e5f82f24b85d2d16e51178" Nov 21 13:46:55 crc kubenswrapper[5133]: E1121 13:46:55.155721 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d24e2d336a52a941a92c4bfbb6fd3021de0c375396e5f82f24b85d2d16e51178\": container with ID starting with d24e2d336a52a941a92c4bfbb6fd3021de0c375396e5f82f24b85d2d16e51178 not found: ID does not exist" containerID="d24e2d336a52a941a92c4bfbb6fd3021de0c375396e5f82f24b85d2d16e51178" Nov 21 13:46:55 crc kubenswrapper[5133]: I1121 13:46:55.155766 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d24e2d336a52a941a92c4bfbb6fd3021de0c375396e5f82f24b85d2d16e51178"} err="failed to get container status \"d24e2d336a52a941a92c4bfbb6fd3021de0c375396e5f82f24b85d2d16e51178\": rpc error: code = NotFound desc = could not find container \"d24e2d336a52a941a92c4bfbb6fd3021de0c375396e5f82f24b85d2d16e51178\": container with ID starting with d24e2d336a52a941a92c4bfbb6fd3021de0c375396e5f82f24b85d2d16e51178 not found: ID does not exist" Nov 21 13:46:55 crc kubenswrapper[5133]: I1121 13:46:55.155796 5133 scope.go:117] "RemoveContainer" containerID="ee71c4446d5429d79f2a22fa204673853e3755fff8c17082ac9db397f8722b37" Nov 21 13:46:55 crc kubenswrapper[5133]: E1121 13:46:55.156107 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ee71c4446d5429d79f2a22fa204673853e3755fff8c17082ac9db397f8722b37\": container with ID starting with ee71c4446d5429d79f2a22fa204673853e3755fff8c17082ac9db397f8722b37 not found: ID does not exist" containerID="ee71c4446d5429d79f2a22fa204673853e3755fff8c17082ac9db397f8722b37" Nov 21 13:46:55 crc kubenswrapper[5133]: I1121 13:46:55.156130 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ee71c4446d5429d79f2a22fa204673853e3755fff8c17082ac9db397f8722b37"} err="failed to get container status \"ee71c4446d5429d79f2a22fa204673853e3755fff8c17082ac9db397f8722b37\": rpc error: code = NotFound desc = could not find container \"ee71c4446d5429d79f2a22fa204673853e3755fff8c17082ac9db397f8722b37\": container with ID starting with ee71c4446d5429d79f2a22fa204673853e3755fff8c17082ac9db397f8722b37 not found: ID does not exist" Nov 21 13:46:55 crc kubenswrapper[5133]: I1121 13:46:55.156149 5133 scope.go:117] "RemoveContainer" containerID="79c1988725f1acd3f69110e5a469518dba6c71edea32306c440fdfe9d96d2dd8" Nov 21 13:46:55 crc kubenswrapper[5133]: E1121 13:46:55.156418 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"79c1988725f1acd3f69110e5a469518dba6c71edea32306c440fdfe9d96d2dd8\": container with ID starting with 79c1988725f1acd3f69110e5a469518dba6c71edea32306c440fdfe9d96d2dd8 not found: ID does not exist" containerID="79c1988725f1acd3f69110e5a469518dba6c71edea32306c440fdfe9d96d2dd8" Nov 21 13:46:55 crc kubenswrapper[5133]: I1121 13:46:55.156452 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"79c1988725f1acd3f69110e5a469518dba6c71edea32306c440fdfe9d96d2dd8"} err="failed to get container status \"79c1988725f1acd3f69110e5a469518dba6c71edea32306c440fdfe9d96d2dd8\": rpc error: code = NotFound desc = could not find container \"79c1988725f1acd3f69110e5a469518dba6c71edea32306c440fdfe9d96d2dd8\": container with ID starting with 79c1988725f1acd3f69110e5a469518dba6c71edea32306c440fdfe9d96d2dd8 not found: ID does not exist" Nov 21 13:46:55 crc kubenswrapper[5133]: I1121 13:46:55.171220 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-b9rk5" podStartSLOduration=4.666782146 podStartE2EDuration="1m47.171196381s" podCreationTimestamp="2025-11-21 13:45:08 +0000 UTC" firstStartedPulling="2025-11-21 13:45:10.116241608 +0000 UTC m=+169.914073856" lastFinishedPulling="2025-11-21 13:46:52.620655843 +0000 UTC m=+272.418488091" observedRunningTime="2025-11-21 13:46:55.160784861 +0000 UTC m=+274.958617109" watchObservedRunningTime="2025-11-21 13:46:55.171196381 +0000 UTC m=+274.969028629" Nov 21 13:46:55 crc kubenswrapper[5133]: I1121 13:46:55.199715 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-xp47m" podStartSLOduration=16.292547481 podStartE2EDuration="1m48.199694858s" podCreationTimestamp="2025-11-21 13:45:07 +0000 UTC" firstStartedPulling="2025-11-21 13:45:09.062922222 +0000 UTC m=+168.860754470" lastFinishedPulling="2025-11-21 13:46:40.970069599 +0000 UTC m=+260.767901847" observedRunningTime="2025-11-21 13:46:55.189623338 +0000 UTC m=+274.987455586" watchObservedRunningTime="2025-11-21 13:46:55.199694858 +0000 UTC m=+274.997527106" Nov 21 13:46:55 crc kubenswrapper[5133]: I1121 13:46:55.206669 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-bbj2q" podStartSLOduration=4.733499483 podStartE2EDuration="1m45.206646678s" podCreationTimestamp="2025-11-21 13:45:10 +0000 UTC" firstStartedPulling="2025-11-21 13:45:12.186056565 +0000 UTC m=+171.983888813" lastFinishedPulling="2025-11-21 13:46:52.65920374 +0000 UTC m=+272.457036008" observedRunningTime="2025-11-21 13:46:55.203584009 +0000 UTC m=+275.001416257" watchObservedRunningTime="2025-11-21 13:46:55.206646678 +0000 UTC m=+275.004478916" Nov 21 13:46:58 crc kubenswrapper[5133]: I1121 13:46:58.175226 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-xp47m" Nov 21 13:46:58 crc kubenswrapper[5133]: I1121 13:46:58.175846 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-xp47m" Nov 21 13:46:58 crc kubenswrapper[5133]: I1121 13:46:58.234362 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-xp47m" Nov 21 13:46:58 crc kubenswrapper[5133]: I1121 13:46:58.583616 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-b9rk5" Nov 21 13:46:58 crc kubenswrapper[5133]: I1121 13:46:58.583700 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-b9rk5" Nov 21 13:46:58 crc kubenswrapper[5133]: I1121 13:46:58.643253 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-b9rk5" Nov 21 13:46:59 crc kubenswrapper[5133]: I1121 13:46:59.165208 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2z48l" event={"ID":"a76889ae-3a7d-4d58-a381-9b8082db6f4a","Type":"ContainerStarted","Data":"dce873e0f0b1e6a4b71cce074dd3a9256271d87c5bf48bb436fb23f0c39ee1b1"} Nov 21 13:46:59 crc kubenswrapper[5133]: I1121 13:46:59.233799 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-xp47m" Nov 21 13:46:59 crc kubenswrapper[5133]: I1121 13:46:59.233908 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-b9rk5" Nov 21 13:47:00 crc kubenswrapper[5133]: I1121 13:47:00.368218 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-bbj2q" Nov 21 13:47:00 crc kubenswrapper[5133]: I1121 13:47:00.368741 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-bbj2q" Nov 21 13:47:00 crc kubenswrapper[5133]: I1121 13:47:00.435420 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-bbj2q" Nov 21 13:47:01 crc kubenswrapper[5133]: I1121 13:47:01.182402 5133 generic.go:334] "Generic (PLEG): container finished" podID="a76889ae-3a7d-4d58-a381-9b8082db6f4a" containerID="dce873e0f0b1e6a4b71cce074dd3a9256271d87c5bf48bb436fb23f0c39ee1b1" exitCode=0 Nov 21 13:47:01 crc kubenswrapper[5133]: I1121 13:47:01.182587 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2z48l" event={"ID":"a76889ae-3a7d-4d58-a381-9b8082db6f4a","Type":"ContainerDied","Data":"dce873e0f0b1e6a4b71cce074dd3a9256271d87c5bf48bb436fb23f0c39ee1b1"} Nov 21 13:47:01 crc kubenswrapper[5133]: I1121 13:47:01.256189 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-bbj2q" Nov 21 13:47:03 crc kubenswrapper[5133]: I1121 13:47:03.380859 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-b9rk5"] Nov 21 13:47:03 crc kubenswrapper[5133]: I1121 13:47:03.381881 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-b9rk5" podUID="94664536-aa38-4b5b-b7b2-fa28904da365" containerName="registry-server" containerID="cri-o://91035a622d42c138a9d506a786c79938cbc750b3a3455d468d31dbe0d6659caa" gracePeriod=2 Nov 21 13:47:04 crc kubenswrapper[5133]: I1121 13:47:04.378844 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-bbj2q"] Nov 21 13:47:04 crc kubenswrapper[5133]: I1121 13:47:04.392287 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-bbj2q" podUID="c292c11b-bba4-40c5-a591-ba3f896e610e" containerName="registry-server" containerID="cri-o://42aec161f28c3155b244e6d23d5396d1122eaa1a12aaa2697f1d62eafb836dbb" gracePeriod=2 Nov 21 13:47:06 crc kubenswrapper[5133]: I1121 13:47:06.223946 5133 generic.go:334] "Generic (PLEG): container finished" podID="94664536-aa38-4b5b-b7b2-fa28904da365" containerID="91035a622d42c138a9d506a786c79938cbc750b3a3455d468d31dbe0d6659caa" exitCode=0 Nov 21 13:47:06 crc kubenswrapper[5133]: I1121 13:47:06.224027 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-b9rk5" event={"ID":"94664536-aa38-4b5b-b7b2-fa28904da365","Type":"ContainerDied","Data":"91035a622d42c138a9d506a786c79938cbc750b3a3455d468d31dbe0d6659caa"} Nov 21 13:47:08 crc kubenswrapper[5133]: I1121 13:47:08.151869 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-b9rk5" Nov 21 13:47:08 crc kubenswrapper[5133]: I1121 13:47:08.243402 5133 generic.go:334] "Generic (PLEG): container finished" podID="c292c11b-bba4-40c5-a591-ba3f896e610e" containerID="42aec161f28c3155b244e6d23d5396d1122eaa1a12aaa2697f1d62eafb836dbb" exitCode=0 Nov 21 13:47:08 crc kubenswrapper[5133]: I1121 13:47:08.243493 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bbj2q" event={"ID":"c292c11b-bba4-40c5-a591-ba3f896e610e","Type":"ContainerDied","Data":"42aec161f28c3155b244e6d23d5396d1122eaa1a12aaa2697f1d62eafb836dbb"} Nov 21 13:47:08 crc kubenswrapper[5133]: I1121 13:47:08.248382 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-b9rk5" event={"ID":"94664536-aa38-4b5b-b7b2-fa28904da365","Type":"ContainerDied","Data":"760cdb55798d0e8262649674a27295c294922e72f7f4622436ee349b1f4f917c"} Nov 21 13:47:08 crc kubenswrapper[5133]: I1121 13:47:08.248429 5133 scope.go:117] "RemoveContainer" containerID="91035a622d42c138a9d506a786c79938cbc750b3a3455d468d31dbe0d6659caa" Nov 21 13:47:08 crc kubenswrapper[5133]: I1121 13:47:08.248520 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-b9rk5" Nov 21 13:47:08 crc kubenswrapper[5133]: I1121 13:47:08.309370 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/94664536-aa38-4b5b-b7b2-fa28904da365-catalog-content\") pod \"94664536-aa38-4b5b-b7b2-fa28904da365\" (UID: \"94664536-aa38-4b5b-b7b2-fa28904da365\") " Nov 21 13:47:08 crc kubenswrapper[5133]: I1121 13:47:08.309421 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4jjqm\" (UniqueName: \"kubernetes.io/projected/94664536-aa38-4b5b-b7b2-fa28904da365-kube-api-access-4jjqm\") pod \"94664536-aa38-4b5b-b7b2-fa28904da365\" (UID: \"94664536-aa38-4b5b-b7b2-fa28904da365\") " Nov 21 13:47:08 crc kubenswrapper[5133]: I1121 13:47:08.309446 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/94664536-aa38-4b5b-b7b2-fa28904da365-utilities\") pod \"94664536-aa38-4b5b-b7b2-fa28904da365\" (UID: \"94664536-aa38-4b5b-b7b2-fa28904da365\") " Nov 21 13:47:08 crc kubenswrapper[5133]: I1121 13:47:08.310739 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/94664536-aa38-4b5b-b7b2-fa28904da365-utilities" (OuterVolumeSpecName: "utilities") pod "94664536-aa38-4b5b-b7b2-fa28904da365" (UID: "94664536-aa38-4b5b-b7b2-fa28904da365"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 13:47:08 crc kubenswrapper[5133]: I1121 13:47:08.326836 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/94664536-aa38-4b5b-b7b2-fa28904da365-kube-api-access-4jjqm" (OuterVolumeSpecName: "kube-api-access-4jjqm") pod "94664536-aa38-4b5b-b7b2-fa28904da365" (UID: "94664536-aa38-4b5b-b7b2-fa28904da365"). InnerVolumeSpecName "kube-api-access-4jjqm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:47:08 crc kubenswrapper[5133]: I1121 13:47:08.361143 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/94664536-aa38-4b5b-b7b2-fa28904da365-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "94664536-aa38-4b5b-b7b2-fa28904da365" (UID: "94664536-aa38-4b5b-b7b2-fa28904da365"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 13:47:08 crc kubenswrapper[5133]: I1121 13:47:08.410937 5133 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/94664536-aa38-4b5b-b7b2-fa28904da365-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 13:47:08 crc kubenswrapper[5133]: I1121 13:47:08.410992 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4jjqm\" (UniqueName: \"kubernetes.io/projected/94664536-aa38-4b5b-b7b2-fa28904da365-kube-api-access-4jjqm\") on node \"crc\" DevicePath \"\"" Nov 21 13:47:08 crc kubenswrapper[5133]: I1121 13:47:08.411026 5133 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/94664536-aa38-4b5b-b7b2-fa28904da365-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 13:47:08 crc kubenswrapper[5133]: I1121 13:47:08.575524 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-b9rk5"] Nov 21 13:47:08 crc kubenswrapper[5133]: I1121 13:47:08.582735 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-b9rk5"] Nov 21 13:47:09 crc kubenswrapper[5133]: I1121 13:47:09.648335 5133 scope.go:117] "RemoveContainer" containerID="b41c2568cdc3bb24b2d7e89bc42e4d2a614dc4bbedaff8c99ec5bbb8a92535e5" Nov 21 13:47:10 crc kubenswrapper[5133]: E1121 13:47:10.369303 5133 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 42aec161f28c3155b244e6d23d5396d1122eaa1a12aaa2697f1d62eafb836dbb is running failed: container process not found" containerID="42aec161f28c3155b244e6d23d5396d1122eaa1a12aaa2697f1d62eafb836dbb" cmd=["grpc_health_probe","-addr=:50051"] Nov 21 13:47:10 crc kubenswrapper[5133]: E1121 13:47:10.369967 5133 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 42aec161f28c3155b244e6d23d5396d1122eaa1a12aaa2697f1d62eafb836dbb is running failed: container process not found" containerID="42aec161f28c3155b244e6d23d5396d1122eaa1a12aaa2697f1d62eafb836dbb" cmd=["grpc_health_probe","-addr=:50051"] Nov 21 13:47:10 crc kubenswrapper[5133]: E1121 13:47:10.370841 5133 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 42aec161f28c3155b244e6d23d5396d1122eaa1a12aaa2697f1d62eafb836dbb is running failed: container process not found" containerID="42aec161f28c3155b244e6d23d5396d1122eaa1a12aaa2697f1d62eafb836dbb" cmd=["grpc_health_probe","-addr=:50051"] Nov 21 13:47:10 crc kubenswrapper[5133]: E1121 13:47:10.370889 5133 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 42aec161f28c3155b244e6d23d5396d1122eaa1a12aaa2697f1d62eafb836dbb is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/redhat-marketplace-bbj2q" podUID="c292c11b-bba4-40c5-a591-ba3f896e610e" containerName="registry-server" Nov 21 13:47:10 crc kubenswrapper[5133]: I1121 13:47:10.465956 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="94664536-aa38-4b5b-b7b2-fa28904da365" path="/var/lib/kubelet/pods/94664536-aa38-4b5b-b7b2-fa28904da365/volumes" Nov 21 13:47:10 crc kubenswrapper[5133]: I1121 13:47:10.632149 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bbj2q" Nov 21 13:47:10 crc kubenswrapper[5133]: I1121 13:47:10.659209 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bjwfx\" (UniqueName: \"kubernetes.io/projected/c292c11b-bba4-40c5-a591-ba3f896e610e-kube-api-access-bjwfx\") pod \"c292c11b-bba4-40c5-a591-ba3f896e610e\" (UID: \"c292c11b-bba4-40c5-a591-ba3f896e610e\") " Nov 21 13:47:10 crc kubenswrapper[5133]: I1121 13:47:10.659514 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c292c11b-bba4-40c5-a591-ba3f896e610e-utilities\") pod \"c292c11b-bba4-40c5-a591-ba3f896e610e\" (UID: \"c292c11b-bba4-40c5-a591-ba3f896e610e\") " Nov 21 13:47:10 crc kubenswrapper[5133]: I1121 13:47:10.661327 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c292c11b-bba4-40c5-a591-ba3f896e610e-utilities" (OuterVolumeSpecName: "utilities") pod "c292c11b-bba4-40c5-a591-ba3f896e610e" (UID: "c292c11b-bba4-40c5-a591-ba3f896e610e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 13:47:10 crc kubenswrapper[5133]: I1121 13:47:10.670346 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c292c11b-bba4-40c5-a591-ba3f896e610e-kube-api-access-bjwfx" (OuterVolumeSpecName: "kube-api-access-bjwfx") pod "c292c11b-bba4-40c5-a591-ba3f896e610e" (UID: "c292c11b-bba4-40c5-a591-ba3f896e610e"). InnerVolumeSpecName "kube-api-access-bjwfx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:47:10 crc kubenswrapper[5133]: I1121 13:47:10.760934 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c292c11b-bba4-40c5-a591-ba3f896e610e-catalog-content\") pod \"c292c11b-bba4-40c5-a591-ba3f896e610e\" (UID: \"c292c11b-bba4-40c5-a591-ba3f896e610e\") " Nov 21 13:47:10 crc kubenswrapper[5133]: I1121 13:47:10.761193 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bjwfx\" (UniqueName: \"kubernetes.io/projected/c292c11b-bba4-40c5-a591-ba3f896e610e-kube-api-access-bjwfx\") on node \"crc\" DevicePath \"\"" Nov 21 13:47:10 crc kubenswrapper[5133]: I1121 13:47:10.761209 5133 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c292c11b-bba4-40c5-a591-ba3f896e610e-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 13:47:10 crc kubenswrapper[5133]: I1121 13:47:10.782662 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c292c11b-bba4-40c5-a591-ba3f896e610e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c292c11b-bba4-40c5-a591-ba3f896e610e" (UID: "c292c11b-bba4-40c5-a591-ba3f896e610e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 13:47:10 crc kubenswrapper[5133]: I1121 13:47:10.862723 5133 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c292c11b-bba4-40c5-a591-ba3f896e610e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 13:47:11 crc kubenswrapper[5133]: I1121 13:47:11.300660 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bbj2q" event={"ID":"c292c11b-bba4-40c5-a591-ba3f896e610e","Type":"ContainerDied","Data":"00fa097bbe8f78733c1ecc7aaca32c34da2e29dc9ca45f7e2cb2f5c28ef3ff5f"} Nov 21 13:47:11 crc kubenswrapper[5133]: I1121 13:47:11.300854 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bbj2q" Nov 21 13:47:11 crc kubenswrapper[5133]: I1121 13:47:11.353340 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-bbj2q"] Nov 21 13:47:11 crc kubenswrapper[5133]: I1121 13:47:11.360392 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-bbj2q"] Nov 21 13:47:11 crc kubenswrapper[5133]: I1121 13:47:11.914874 5133 scope.go:117] "RemoveContainer" containerID="e0c05cab8dbd1aafc602ade374a9763f608d0b7ed35ebbab2220a8ae06acf2b8" Nov 21 13:47:11 crc kubenswrapper[5133]: I1121 13:47:11.971157 5133 scope.go:117] "RemoveContainer" containerID="42aec161f28c3155b244e6d23d5396d1122eaa1a12aaa2697f1d62eafb836dbb" Nov 21 13:47:12 crc kubenswrapper[5133]: I1121 13:47:12.033228 5133 scope.go:117] "RemoveContainer" containerID="7fc9eaed7c2d55bcb144595c8736c6bd3b3541f9274989efa22ace1c794d99d2" Nov 21 13:47:12 crc kubenswrapper[5133]: I1121 13:47:12.058296 5133 scope.go:117] "RemoveContainer" containerID="07f5f887ec4ae8b3d3a9aa3943dd23d751b95276b5c4c460cfad8c2958341242" Nov 21 13:47:12 crc kubenswrapper[5133]: I1121 13:47:12.470688 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c292c11b-bba4-40c5-a591-ba3f896e610e" path="/var/lib/kubelet/pods/c292c11b-bba4-40c5-a591-ba3f896e610e/volumes" Nov 21 13:47:13 crc kubenswrapper[5133]: I1121 13:47:13.330564 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wt7r9" event={"ID":"cb05a95a-9c39-4c50-b64b-91d20d5d5ebc","Type":"ContainerStarted","Data":"62b7930dabff41b19bdcc2b0fcf551112a65aa7cffc489e02560f8c1c7c6913f"} Nov 21 13:47:13 crc kubenswrapper[5133]: I1121 13:47:13.353694 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-wt7r9" podStartSLOduration=4.673702727 podStartE2EDuration="2m2.353668319s" podCreationTimestamp="2025-11-21 13:45:11 +0000 UTC" firstStartedPulling="2025-11-21 13:45:14.23491384 +0000 UTC m=+174.032746088" lastFinishedPulling="2025-11-21 13:47:11.914879392 +0000 UTC m=+291.712711680" observedRunningTime="2025-11-21 13:47:13.349373817 +0000 UTC m=+293.147206105" watchObservedRunningTime="2025-11-21 13:47:13.353668319 +0000 UTC m=+293.151500567" Nov 21 13:47:21 crc kubenswrapper[5133]: I1121 13:47:21.362659 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-wt7r9" Nov 21 13:47:21 crc kubenswrapper[5133]: I1121 13:47:21.363508 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-wt7r9" Nov 21 13:47:21 crc kubenswrapper[5133]: I1121 13:47:21.390080 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2z48l" event={"ID":"a76889ae-3a7d-4d58-a381-9b8082db6f4a","Type":"ContainerStarted","Data":"5928248cba0e5a985c48386da8d37edab119d2774b744b092fb0a949f39922bc"} Nov 21 13:47:21 crc kubenswrapper[5133]: I1121 13:47:21.417042 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-wt7r9" Nov 21 13:47:21 crc kubenswrapper[5133]: I1121 13:47:21.483528 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-wt7r9" Nov 21 13:47:21 crc kubenswrapper[5133]: I1121 13:47:21.679089 5133 cert_rotation.go:91] certificate rotation detected, shutting down client connections to start using new credentials Nov 21 13:47:22 crc kubenswrapper[5133]: I1121 13:47:22.434678 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-2z48l" podStartSLOduration=6.840138873 podStartE2EDuration="2m11.434658322s" podCreationTimestamp="2025-11-21 13:45:11 +0000 UTC" firstStartedPulling="2025-11-21 13:45:14.234642183 +0000 UTC m=+174.032474431" lastFinishedPulling="2025-11-21 13:47:18.829161622 +0000 UTC m=+298.626993880" observedRunningTime="2025-11-21 13:47:22.430879504 +0000 UTC m=+302.228711812" watchObservedRunningTime="2025-11-21 13:47:22.434658322 +0000 UTC m=+302.232490580" Nov 21 13:47:31 crc kubenswrapper[5133]: I1121 13:47:31.811675 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-2z48l" Nov 21 13:47:31 crc kubenswrapper[5133]: I1121 13:47:31.812576 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-2z48l" Nov 21 13:47:31 crc kubenswrapper[5133]: I1121 13:47:31.870550 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-2z48l" Nov 21 13:47:32 crc kubenswrapper[5133]: I1121 13:47:32.527013 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-2z48l" Nov 21 13:47:32 crc kubenswrapper[5133]: I1121 13:47:32.584180 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-2z48l"] Nov 21 13:47:34 crc kubenswrapper[5133]: I1121 13:47:34.483338 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-2z48l" podUID="a76889ae-3a7d-4d58-a381-9b8082db6f4a" containerName="registry-server" containerID="cri-o://5928248cba0e5a985c48386da8d37edab119d2774b744b092fb0a949f39922bc" gracePeriod=2 Nov 21 13:47:35 crc kubenswrapper[5133]: I1121 13:47:35.376821 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2z48l" Nov 21 13:47:35 crc kubenswrapper[5133]: I1121 13:47:35.490490 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a76889ae-3a7d-4d58-a381-9b8082db6f4a-utilities\") pod \"a76889ae-3a7d-4d58-a381-9b8082db6f4a\" (UID: \"a76889ae-3a7d-4d58-a381-9b8082db6f4a\") " Nov 21 13:47:35 crc kubenswrapper[5133]: I1121 13:47:35.490567 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rlrsx\" (UniqueName: \"kubernetes.io/projected/a76889ae-3a7d-4d58-a381-9b8082db6f4a-kube-api-access-rlrsx\") pod \"a76889ae-3a7d-4d58-a381-9b8082db6f4a\" (UID: \"a76889ae-3a7d-4d58-a381-9b8082db6f4a\") " Nov 21 13:47:35 crc kubenswrapper[5133]: I1121 13:47:35.490675 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a76889ae-3a7d-4d58-a381-9b8082db6f4a-catalog-content\") pod \"a76889ae-3a7d-4d58-a381-9b8082db6f4a\" (UID: \"a76889ae-3a7d-4d58-a381-9b8082db6f4a\") " Nov 21 13:47:35 crc kubenswrapper[5133]: I1121 13:47:35.492228 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a76889ae-3a7d-4d58-a381-9b8082db6f4a-utilities" (OuterVolumeSpecName: "utilities") pod "a76889ae-3a7d-4d58-a381-9b8082db6f4a" (UID: "a76889ae-3a7d-4d58-a381-9b8082db6f4a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 13:47:35 crc kubenswrapper[5133]: I1121 13:47:35.495687 5133 generic.go:334] "Generic (PLEG): container finished" podID="a76889ae-3a7d-4d58-a381-9b8082db6f4a" containerID="5928248cba0e5a985c48386da8d37edab119d2774b744b092fb0a949f39922bc" exitCode=0 Nov 21 13:47:35 crc kubenswrapper[5133]: I1121 13:47:35.495875 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2z48l" Nov 21 13:47:35 crc kubenswrapper[5133]: I1121 13:47:35.495889 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2z48l" event={"ID":"a76889ae-3a7d-4d58-a381-9b8082db6f4a","Type":"ContainerDied","Data":"5928248cba0e5a985c48386da8d37edab119d2774b744b092fb0a949f39922bc"} Nov 21 13:47:35 crc kubenswrapper[5133]: I1121 13:47:35.496168 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2z48l" event={"ID":"a76889ae-3a7d-4d58-a381-9b8082db6f4a","Type":"ContainerDied","Data":"216bec72ce3f62e2d26b14e9683324f4dabc6f446780687291f76180e306a6d4"} Nov 21 13:47:35 crc kubenswrapper[5133]: I1121 13:47:35.496259 5133 scope.go:117] "RemoveContainer" containerID="5928248cba0e5a985c48386da8d37edab119d2774b744b092fb0a949f39922bc" Nov 21 13:47:35 crc kubenswrapper[5133]: I1121 13:47:35.500838 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a76889ae-3a7d-4d58-a381-9b8082db6f4a-kube-api-access-rlrsx" (OuterVolumeSpecName: "kube-api-access-rlrsx") pod "a76889ae-3a7d-4d58-a381-9b8082db6f4a" (UID: "a76889ae-3a7d-4d58-a381-9b8082db6f4a"). InnerVolumeSpecName "kube-api-access-rlrsx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:47:35 crc kubenswrapper[5133]: I1121 13:47:35.542769 5133 scope.go:117] "RemoveContainer" containerID="dce873e0f0b1e6a4b71cce074dd3a9256271d87c5bf48bb436fb23f0c39ee1b1" Nov 21 13:47:35 crc kubenswrapper[5133]: I1121 13:47:35.560100 5133 scope.go:117] "RemoveContainer" containerID="8206f0a9d13477518b2b6cf7d9c11bc93a63186af31a20180f1c841d61debac0" Nov 21 13:47:35 crc kubenswrapper[5133]: I1121 13:47:35.577395 5133 scope.go:117] "RemoveContainer" containerID="5928248cba0e5a985c48386da8d37edab119d2774b744b092fb0a949f39922bc" Nov 21 13:47:35 crc kubenswrapper[5133]: E1121 13:47:35.578106 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5928248cba0e5a985c48386da8d37edab119d2774b744b092fb0a949f39922bc\": container with ID starting with 5928248cba0e5a985c48386da8d37edab119d2774b744b092fb0a949f39922bc not found: ID does not exist" containerID="5928248cba0e5a985c48386da8d37edab119d2774b744b092fb0a949f39922bc" Nov 21 13:47:35 crc kubenswrapper[5133]: I1121 13:47:35.578161 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5928248cba0e5a985c48386da8d37edab119d2774b744b092fb0a949f39922bc"} err="failed to get container status \"5928248cba0e5a985c48386da8d37edab119d2774b744b092fb0a949f39922bc\": rpc error: code = NotFound desc = could not find container \"5928248cba0e5a985c48386da8d37edab119d2774b744b092fb0a949f39922bc\": container with ID starting with 5928248cba0e5a985c48386da8d37edab119d2774b744b092fb0a949f39922bc not found: ID does not exist" Nov 21 13:47:35 crc kubenswrapper[5133]: I1121 13:47:35.578192 5133 scope.go:117] "RemoveContainer" containerID="dce873e0f0b1e6a4b71cce074dd3a9256271d87c5bf48bb436fb23f0c39ee1b1" Nov 21 13:47:35 crc kubenswrapper[5133]: E1121 13:47:35.578623 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dce873e0f0b1e6a4b71cce074dd3a9256271d87c5bf48bb436fb23f0c39ee1b1\": container with ID starting with dce873e0f0b1e6a4b71cce074dd3a9256271d87c5bf48bb436fb23f0c39ee1b1 not found: ID does not exist" containerID="dce873e0f0b1e6a4b71cce074dd3a9256271d87c5bf48bb436fb23f0c39ee1b1" Nov 21 13:47:35 crc kubenswrapper[5133]: I1121 13:47:35.578661 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dce873e0f0b1e6a4b71cce074dd3a9256271d87c5bf48bb436fb23f0c39ee1b1"} err="failed to get container status \"dce873e0f0b1e6a4b71cce074dd3a9256271d87c5bf48bb436fb23f0c39ee1b1\": rpc error: code = NotFound desc = could not find container \"dce873e0f0b1e6a4b71cce074dd3a9256271d87c5bf48bb436fb23f0c39ee1b1\": container with ID starting with dce873e0f0b1e6a4b71cce074dd3a9256271d87c5bf48bb436fb23f0c39ee1b1 not found: ID does not exist" Nov 21 13:47:35 crc kubenswrapper[5133]: I1121 13:47:35.578690 5133 scope.go:117] "RemoveContainer" containerID="8206f0a9d13477518b2b6cf7d9c11bc93a63186af31a20180f1c841d61debac0" Nov 21 13:47:35 crc kubenswrapper[5133]: E1121 13:47:35.579067 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8206f0a9d13477518b2b6cf7d9c11bc93a63186af31a20180f1c841d61debac0\": container with ID starting with 8206f0a9d13477518b2b6cf7d9c11bc93a63186af31a20180f1c841d61debac0 not found: ID does not exist" containerID="8206f0a9d13477518b2b6cf7d9c11bc93a63186af31a20180f1c841d61debac0" Nov 21 13:47:35 crc kubenswrapper[5133]: I1121 13:47:35.579122 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8206f0a9d13477518b2b6cf7d9c11bc93a63186af31a20180f1c841d61debac0"} err="failed to get container status \"8206f0a9d13477518b2b6cf7d9c11bc93a63186af31a20180f1c841d61debac0\": rpc error: code = NotFound desc = could not find container \"8206f0a9d13477518b2b6cf7d9c11bc93a63186af31a20180f1c841d61debac0\": container with ID starting with 8206f0a9d13477518b2b6cf7d9c11bc93a63186af31a20180f1c841d61debac0 not found: ID does not exist" Nov 21 13:47:35 crc kubenswrapper[5133]: I1121 13:47:35.591933 5133 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a76889ae-3a7d-4d58-a381-9b8082db6f4a-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 13:47:35 crc kubenswrapper[5133]: I1121 13:47:35.591961 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rlrsx\" (UniqueName: \"kubernetes.io/projected/a76889ae-3a7d-4d58-a381-9b8082db6f4a-kube-api-access-rlrsx\") on node \"crc\" DevicePath \"\"" Nov 21 13:47:35 crc kubenswrapper[5133]: I1121 13:47:35.600723 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a76889ae-3a7d-4d58-a381-9b8082db6f4a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a76889ae-3a7d-4d58-a381-9b8082db6f4a" (UID: "a76889ae-3a7d-4d58-a381-9b8082db6f4a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 13:47:35 crc kubenswrapper[5133]: I1121 13:47:35.693783 5133 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a76889ae-3a7d-4d58-a381-9b8082db6f4a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 13:47:35 crc kubenswrapper[5133]: I1121 13:47:35.827346 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-2z48l"] Nov 21 13:47:35 crc kubenswrapper[5133]: I1121 13:47:35.831850 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-2z48l"] Nov 21 13:47:36 crc kubenswrapper[5133]: I1121 13:47:36.478498 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a76889ae-3a7d-4d58-a381-9b8082db6f4a" path="/var/lib/kubelet/pods/a76889ae-3a7d-4d58-a381-9b8082db6f4a/volumes" Nov 21 13:48:53 crc kubenswrapper[5133]: I1121 13:48:53.311200 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 13:48:53 crc kubenswrapper[5133]: I1121 13:48:53.312103 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.166037 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-xp47m"] Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.167141 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-xp47m" podUID="ce3b64c0-a2cb-416d-9c9a-14c10514bfac" containerName="registry-server" containerID="cri-o://32ae11dc70213e101059c11f64fdf08b022c007b12cb3704ae30d903322b3dc4" gracePeriod=30 Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.176241 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-rfbh2"] Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.176691 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-rfbh2" podUID="a6b4a203-3329-4e08-b2d6-61ec7ee18e7e" containerName="registry-server" containerID="cri-o://2dfe1c6ac873af7082b6127a19dc85483e793fb345768fb8ef355c448475f62a" gracePeriod=30 Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.199263 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-zd8tq"] Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.199551 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-zd8tq" podUID="8bbf68aa-448e-453c-8df2-839594103920" containerName="marketplace-operator" containerID="cri-o://c2b164d932ba8e7622063737dc8b40874979e3707cbda6cc12bb8bb02d12e74e" gracePeriod=30 Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.208364 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-pm4gn"] Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.208817 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-pm4gn" podUID="8997b5b2-53b0-4b74-92db-7e82f3b44ea2" containerName="registry-server" containerID="cri-o://e672f71dd43f6c5fd47f09c5cddcf56f06f7047dab6afc91679d56327ffb0be8" gracePeriod=30 Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.227527 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wt7r9"] Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.227923 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-wt7r9" podUID="cb05a95a-9c39-4c50-b64b-91d20d5d5ebc" containerName="registry-server" containerID="cri-o://62b7930dabff41b19bdcc2b0fcf551112a65aa7cffc489e02560f8c1c7c6913f" gracePeriod=30 Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.230716 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-qp85d"] Nov 21 13:48:55 crc kubenswrapper[5133]: E1121 13:48:55.230957 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="297f6c8b-efd5-4df1-901e-d66882a776c5" containerName="extract-content" Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.230971 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="297f6c8b-efd5-4df1-901e-d66882a776c5" containerName="extract-content" Nov 21 13:48:55 crc kubenswrapper[5133]: E1121 13:48:55.230982 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c292c11b-bba4-40c5-a591-ba3f896e610e" containerName="registry-server" Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.230988 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="c292c11b-bba4-40c5-a591-ba3f896e610e" containerName="registry-server" Nov 21 13:48:55 crc kubenswrapper[5133]: E1121 13:48:55.231017 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="297f6c8b-efd5-4df1-901e-d66882a776c5" containerName="registry-server" Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.231024 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="297f6c8b-efd5-4df1-901e-d66882a776c5" containerName="registry-server" Nov 21 13:48:55 crc kubenswrapper[5133]: E1121 13:48:55.231031 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="94664536-aa38-4b5b-b7b2-fa28904da365" containerName="extract-utilities" Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.231037 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="94664536-aa38-4b5b-b7b2-fa28904da365" containerName="extract-utilities" Nov 21 13:48:55 crc kubenswrapper[5133]: E1121 13:48:55.231045 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a752751-f0e9-4016-aeea-ca2df83e2981" containerName="pruner" Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.231051 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a752751-f0e9-4016-aeea-ca2df83e2981" containerName="pruner" Nov 21 13:48:55 crc kubenswrapper[5133]: E1121 13:48:55.231063 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="297f6c8b-efd5-4df1-901e-d66882a776c5" containerName="extract-utilities" Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.231069 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="297f6c8b-efd5-4df1-901e-d66882a776c5" containerName="extract-utilities" Nov 21 13:48:55 crc kubenswrapper[5133]: E1121 13:48:55.231077 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="94664536-aa38-4b5b-b7b2-fa28904da365" containerName="extract-content" Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.231083 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="94664536-aa38-4b5b-b7b2-fa28904da365" containerName="extract-content" Nov 21 13:48:55 crc kubenswrapper[5133]: E1121 13:48:55.231091 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a76889ae-3a7d-4d58-a381-9b8082db6f4a" containerName="registry-server" Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.231097 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="a76889ae-3a7d-4d58-a381-9b8082db6f4a" containerName="registry-server" Nov 21 13:48:55 crc kubenswrapper[5133]: E1121 13:48:55.231108 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a76889ae-3a7d-4d58-a381-9b8082db6f4a" containerName="extract-content" Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.231114 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="a76889ae-3a7d-4d58-a381-9b8082db6f4a" containerName="extract-content" Nov 21 13:48:55 crc kubenswrapper[5133]: E1121 13:48:55.231122 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="94664536-aa38-4b5b-b7b2-fa28904da365" containerName="registry-server" Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.231127 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="94664536-aa38-4b5b-b7b2-fa28904da365" containerName="registry-server" Nov 21 13:48:55 crc kubenswrapper[5133]: E1121 13:48:55.231138 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c292c11b-bba4-40c5-a591-ba3f896e610e" containerName="extract-content" Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.231144 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="c292c11b-bba4-40c5-a591-ba3f896e610e" containerName="extract-content" Nov 21 13:48:55 crc kubenswrapper[5133]: E1121 13:48:55.231153 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c292c11b-bba4-40c5-a591-ba3f896e610e" containerName="extract-utilities" Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.231159 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="c292c11b-bba4-40c5-a591-ba3f896e610e" containerName="extract-utilities" Nov 21 13:48:55 crc kubenswrapper[5133]: E1121 13:48:55.231167 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a76889ae-3a7d-4d58-a381-9b8082db6f4a" containerName="extract-utilities" Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.231174 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="a76889ae-3a7d-4d58-a381-9b8082db6f4a" containerName="extract-utilities" Nov 21 13:48:55 crc kubenswrapper[5133]: E1121 13:48:55.231184 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c33695a-1523-448d-990b-3b16e8902b8a" containerName="pruner" Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.231189 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c33695a-1523-448d-990b-3b16e8902b8a" containerName="pruner" Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.231284 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="94664536-aa38-4b5b-b7b2-fa28904da365" containerName="registry-server" Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.231294 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="a76889ae-3a7d-4d58-a381-9b8082db6f4a" containerName="registry-server" Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.231304 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="297f6c8b-efd5-4df1-901e-d66882a776c5" containerName="registry-server" Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.231312 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="2c33695a-1523-448d-990b-3b16e8902b8a" containerName="pruner" Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.231322 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="6a752751-f0e9-4016-aeea-ca2df83e2981" containerName="pruner" Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.231333 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="c292c11b-bba4-40c5-a591-ba3f896e610e" containerName="registry-server" Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.231808 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-qp85d" Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.242520 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-qp85d"] Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.300608 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-28xgr\" (UniqueName: \"kubernetes.io/projected/22623623-0726-4900-899d-c2d0e34a9562-kube-api-access-28xgr\") pod \"marketplace-operator-79b997595-qp85d\" (UID: \"22623623-0726-4900-899d-c2d0e34a9562\") " pod="openshift-marketplace/marketplace-operator-79b997595-qp85d" Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.300678 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/22623623-0726-4900-899d-c2d0e34a9562-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-qp85d\" (UID: \"22623623-0726-4900-899d-c2d0e34a9562\") " pod="openshift-marketplace/marketplace-operator-79b997595-qp85d" Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.300729 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/22623623-0726-4900-899d-c2d0e34a9562-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-qp85d\" (UID: \"22623623-0726-4900-899d-c2d0e34a9562\") " pod="openshift-marketplace/marketplace-operator-79b997595-qp85d" Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.403589 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-28xgr\" (UniqueName: \"kubernetes.io/projected/22623623-0726-4900-899d-c2d0e34a9562-kube-api-access-28xgr\") pod \"marketplace-operator-79b997595-qp85d\" (UID: \"22623623-0726-4900-899d-c2d0e34a9562\") " pod="openshift-marketplace/marketplace-operator-79b997595-qp85d" Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.403688 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/22623623-0726-4900-899d-c2d0e34a9562-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-qp85d\" (UID: \"22623623-0726-4900-899d-c2d0e34a9562\") " pod="openshift-marketplace/marketplace-operator-79b997595-qp85d" Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.403772 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/22623623-0726-4900-899d-c2d0e34a9562-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-qp85d\" (UID: \"22623623-0726-4900-899d-c2d0e34a9562\") " pod="openshift-marketplace/marketplace-operator-79b997595-qp85d" Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.406327 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/22623623-0726-4900-899d-c2d0e34a9562-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-qp85d\" (UID: \"22623623-0726-4900-899d-c2d0e34a9562\") " pod="openshift-marketplace/marketplace-operator-79b997595-qp85d" Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.415464 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/22623623-0726-4900-899d-c2d0e34a9562-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-qp85d\" (UID: \"22623623-0726-4900-899d-c2d0e34a9562\") " pod="openshift-marketplace/marketplace-operator-79b997595-qp85d" Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.428113 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-28xgr\" (UniqueName: \"kubernetes.io/projected/22623623-0726-4900-899d-c2d0e34a9562-kube-api-access-28xgr\") pod \"marketplace-operator-79b997595-qp85d\" (UID: \"22623623-0726-4900-899d-c2d0e34a9562\") " pod="openshift-marketplace/marketplace-operator-79b997595-qp85d" Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.736384 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-qp85d" Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.738733 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rfbh2" Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.743187 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xp47m" Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.748650 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wt7r9" Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.757839 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-zd8tq" Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.761055 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pm4gn" Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.809659 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4cs8c\" (UniqueName: \"kubernetes.io/projected/cb05a95a-9c39-4c50-b64b-91d20d5d5ebc-kube-api-access-4cs8c\") pod \"cb05a95a-9c39-4c50-b64b-91d20d5d5ebc\" (UID: \"cb05a95a-9c39-4c50-b64b-91d20d5d5ebc\") " Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.809934 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a6b4a203-3329-4e08-b2d6-61ec7ee18e7e-catalog-content\") pod \"a6b4a203-3329-4e08-b2d6-61ec7ee18e7e\" (UID: \"a6b4a203-3329-4e08-b2d6-61ec7ee18e7e\") " Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.810009 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gsp84\" (UniqueName: \"kubernetes.io/projected/8bbf68aa-448e-453c-8df2-839594103920-kube-api-access-gsp84\") pod \"8bbf68aa-448e-453c-8df2-839594103920\" (UID: \"8bbf68aa-448e-453c-8df2-839594103920\") " Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.810060 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8997b5b2-53b0-4b74-92db-7e82f3b44ea2-catalog-content\") pod \"8997b5b2-53b0-4b74-92db-7e82f3b44ea2\" (UID: \"8997b5b2-53b0-4b74-92db-7e82f3b44ea2\") " Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.810081 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8bbf68aa-448e-453c-8df2-839594103920-marketplace-trusted-ca\") pod \"8bbf68aa-448e-453c-8df2-839594103920\" (UID: \"8bbf68aa-448e-453c-8df2-839594103920\") " Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.810111 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cb05a95a-9c39-4c50-b64b-91d20d5d5ebc-utilities\") pod \"cb05a95a-9c39-4c50-b64b-91d20d5d5ebc\" (UID: \"cb05a95a-9c39-4c50-b64b-91d20d5d5ebc\") " Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.810134 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c945m\" (UniqueName: \"kubernetes.io/projected/8997b5b2-53b0-4b74-92db-7e82f3b44ea2-kube-api-access-c945m\") pod \"8997b5b2-53b0-4b74-92db-7e82f3b44ea2\" (UID: \"8997b5b2-53b0-4b74-92db-7e82f3b44ea2\") " Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.810158 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5ffjm\" (UniqueName: \"kubernetes.io/projected/a6b4a203-3329-4e08-b2d6-61ec7ee18e7e-kube-api-access-5ffjm\") pod \"a6b4a203-3329-4e08-b2d6-61ec7ee18e7e\" (UID: \"a6b4a203-3329-4e08-b2d6-61ec7ee18e7e\") " Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.810207 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ce3b64c0-a2cb-416d-9c9a-14c10514bfac-utilities\") pod \"ce3b64c0-a2cb-416d-9c9a-14c10514bfac\" (UID: \"ce3b64c0-a2cb-416d-9c9a-14c10514bfac\") " Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.810239 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8997b5b2-53b0-4b74-92db-7e82f3b44ea2-utilities\") pod \"8997b5b2-53b0-4b74-92db-7e82f3b44ea2\" (UID: \"8997b5b2-53b0-4b74-92db-7e82f3b44ea2\") " Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.810263 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ce3b64c0-a2cb-416d-9c9a-14c10514bfac-catalog-content\") pod \"ce3b64c0-a2cb-416d-9c9a-14c10514bfac\" (UID: \"ce3b64c0-a2cb-416d-9c9a-14c10514bfac\") " Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.810283 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-glkqz\" (UniqueName: \"kubernetes.io/projected/ce3b64c0-a2cb-416d-9c9a-14c10514bfac-kube-api-access-glkqz\") pod \"ce3b64c0-a2cb-416d-9c9a-14c10514bfac\" (UID: \"ce3b64c0-a2cb-416d-9c9a-14c10514bfac\") " Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.810316 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cb05a95a-9c39-4c50-b64b-91d20d5d5ebc-catalog-content\") pod \"cb05a95a-9c39-4c50-b64b-91d20d5d5ebc\" (UID: \"cb05a95a-9c39-4c50-b64b-91d20d5d5ebc\") " Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.810944 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8bbf68aa-448e-453c-8df2-839594103920-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "8bbf68aa-448e-453c-8df2-839594103920" (UID: "8bbf68aa-448e-453c-8df2-839594103920"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.815667 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ce3b64c0-a2cb-416d-9c9a-14c10514bfac-utilities" (OuterVolumeSpecName: "utilities") pod "ce3b64c0-a2cb-416d-9c9a-14c10514bfac" (UID: "ce3b64c0-a2cb-416d-9c9a-14c10514bfac"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.816546 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8997b5b2-53b0-4b74-92db-7e82f3b44ea2-utilities" (OuterVolumeSpecName: "utilities") pod "8997b5b2-53b0-4b74-92db-7e82f3b44ea2" (UID: "8997b5b2-53b0-4b74-92db-7e82f3b44ea2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.818711 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cb05a95a-9c39-4c50-b64b-91d20d5d5ebc-utilities" (OuterVolumeSpecName: "utilities") pod "cb05a95a-9c39-4c50-b64b-91d20d5d5ebc" (UID: "cb05a95a-9c39-4c50-b64b-91d20d5d5ebc"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.825901 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a6b4a203-3329-4e08-b2d6-61ec7ee18e7e-kube-api-access-5ffjm" (OuterVolumeSpecName: "kube-api-access-5ffjm") pod "a6b4a203-3329-4e08-b2d6-61ec7ee18e7e" (UID: "a6b4a203-3329-4e08-b2d6-61ec7ee18e7e"). InnerVolumeSpecName "kube-api-access-5ffjm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.825953 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ce3b64c0-a2cb-416d-9c9a-14c10514bfac-kube-api-access-glkqz" (OuterVolumeSpecName: "kube-api-access-glkqz") pod "ce3b64c0-a2cb-416d-9c9a-14c10514bfac" (UID: "ce3b64c0-a2cb-416d-9c9a-14c10514bfac"). InnerVolumeSpecName "kube-api-access-glkqz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.826072 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cb05a95a-9c39-4c50-b64b-91d20d5d5ebc-kube-api-access-4cs8c" (OuterVolumeSpecName: "kube-api-access-4cs8c") pod "cb05a95a-9c39-4c50-b64b-91d20d5d5ebc" (UID: "cb05a95a-9c39-4c50-b64b-91d20d5d5ebc"). InnerVolumeSpecName "kube-api-access-4cs8c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.826172 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8bbf68aa-448e-453c-8df2-839594103920-kube-api-access-gsp84" (OuterVolumeSpecName: "kube-api-access-gsp84") pod "8bbf68aa-448e-453c-8df2-839594103920" (UID: "8bbf68aa-448e-453c-8df2-839594103920"). InnerVolumeSpecName "kube-api-access-gsp84". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.853180 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8997b5b2-53b0-4b74-92db-7e82f3b44ea2-kube-api-access-c945m" (OuterVolumeSpecName: "kube-api-access-c945m") pod "8997b5b2-53b0-4b74-92db-7e82f3b44ea2" (UID: "8997b5b2-53b0-4b74-92db-7e82f3b44ea2"). InnerVolumeSpecName "kube-api-access-c945m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.862852 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8997b5b2-53b0-4b74-92db-7e82f3b44ea2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8997b5b2-53b0-4b74-92db-7e82f3b44ea2" (UID: "8997b5b2-53b0-4b74-92db-7e82f3b44ea2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.901818 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ce3b64c0-a2cb-416d-9c9a-14c10514bfac-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ce3b64c0-a2cb-416d-9c9a-14c10514bfac" (UID: "ce3b64c0-a2cb-416d-9c9a-14c10514bfac"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.911868 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/8bbf68aa-448e-453c-8df2-839594103920-marketplace-operator-metrics\") pod \"8bbf68aa-448e-453c-8df2-839594103920\" (UID: \"8bbf68aa-448e-453c-8df2-839594103920\") " Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.911921 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a6b4a203-3329-4e08-b2d6-61ec7ee18e7e-utilities\") pod \"a6b4a203-3329-4e08-b2d6-61ec7ee18e7e\" (UID: \"a6b4a203-3329-4e08-b2d6-61ec7ee18e7e\") " Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.912404 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4cs8c\" (UniqueName: \"kubernetes.io/projected/cb05a95a-9c39-4c50-b64b-91d20d5d5ebc-kube-api-access-4cs8c\") on node \"crc\" DevicePath \"\"" Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.912439 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gsp84\" (UniqueName: \"kubernetes.io/projected/8bbf68aa-448e-453c-8df2-839594103920-kube-api-access-gsp84\") on node \"crc\" DevicePath \"\"" Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.912455 5133 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8997b5b2-53b0-4b74-92db-7e82f3b44ea2-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.912472 5133 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8bbf68aa-448e-453c-8df2-839594103920-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.912485 5133 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cb05a95a-9c39-4c50-b64b-91d20d5d5ebc-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.912500 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c945m\" (UniqueName: \"kubernetes.io/projected/8997b5b2-53b0-4b74-92db-7e82f3b44ea2-kube-api-access-c945m\") on node \"crc\" DevicePath \"\"" Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.912514 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5ffjm\" (UniqueName: \"kubernetes.io/projected/a6b4a203-3329-4e08-b2d6-61ec7ee18e7e-kube-api-access-5ffjm\") on node \"crc\" DevicePath \"\"" Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.912527 5133 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ce3b64c0-a2cb-416d-9c9a-14c10514bfac-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.912540 5133 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8997b5b2-53b0-4b74-92db-7e82f3b44ea2-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.912553 5133 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ce3b64c0-a2cb-416d-9c9a-14c10514bfac-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.912568 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-glkqz\" (UniqueName: \"kubernetes.io/projected/ce3b64c0-a2cb-416d-9c9a-14c10514bfac-kube-api-access-glkqz\") on node \"crc\" DevicePath \"\"" Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.913514 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a6b4a203-3329-4e08-b2d6-61ec7ee18e7e-utilities" (OuterVolumeSpecName: "utilities") pod "a6b4a203-3329-4e08-b2d6-61ec7ee18e7e" (UID: "a6b4a203-3329-4e08-b2d6-61ec7ee18e7e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.916834 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8bbf68aa-448e-453c-8df2-839594103920-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "8bbf68aa-448e-453c-8df2-839594103920" (UID: "8bbf68aa-448e-453c-8df2-839594103920"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.947565 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a6b4a203-3329-4e08-b2d6-61ec7ee18e7e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a6b4a203-3329-4e08-b2d6-61ec7ee18e7e" (UID: "a6b4a203-3329-4e08-b2d6-61ec7ee18e7e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 13:48:55 crc kubenswrapper[5133]: I1121 13:48:55.988775 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cb05a95a-9c39-4c50-b64b-91d20d5d5ebc-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "cb05a95a-9c39-4c50-b64b-91d20d5d5ebc" (UID: "cb05a95a-9c39-4c50-b64b-91d20d5d5ebc"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.013173 5133 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/8bbf68aa-448e-453c-8df2-839594103920-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.013224 5133 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a6b4a203-3329-4e08-b2d6-61ec7ee18e7e-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.013234 5133 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a6b4a203-3329-4e08-b2d6-61ec7ee18e7e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.013243 5133 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cb05a95a-9c39-4c50-b64b-91d20d5d5ebc-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.040431 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-qp85d"] Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.045117 5133 generic.go:334] "Generic (PLEG): container finished" podID="a6b4a203-3329-4e08-b2d6-61ec7ee18e7e" containerID="2dfe1c6ac873af7082b6127a19dc85483e793fb345768fb8ef355c448475f62a" exitCode=0 Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.045221 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rfbh2" event={"ID":"a6b4a203-3329-4e08-b2d6-61ec7ee18e7e","Type":"ContainerDied","Data":"2dfe1c6ac873af7082b6127a19dc85483e793fb345768fb8ef355c448475f62a"} Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.045256 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rfbh2" event={"ID":"a6b4a203-3329-4e08-b2d6-61ec7ee18e7e","Type":"ContainerDied","Data":"1e7ad4a4201a2c990212b92cd302903baedf0fd343876175bbd45218bbb30012"} Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.045269 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rfbh2" Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.045280 5133 scope.go:117] "RemoveContainer" containerID="2dfe1c6ac873af7082b6127a19dc85483e793fb345768fb8ef355c448475f62a" Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.049574 5133 generic.go:334] "Generic (PLEG): container finished" podID="8bbf68aa-448e-453c-8df2-839594103920" containerID="c2b164d932ba8e7622063737dc8b40874979e3707cbda6cc12bb8bb02d12e74e" exitCode=0 Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.049685 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-zd8tq" event={"ID":"8bbf68aa-448e-453c-8df2-839594103920","Type":"ContainerDied","Data":"c2b164d932ba8e7622063737dc8b40874979e3707cbda6cc12bb8bb02d12e74e"} Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.049711 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-zd8tq" event={"ID":"8bbf68aa-448e-453c-8df2-839594103920","Type":"ContainerDied","Data":"a8f12750df1a1069e492d316ba24f34d000032db0ad07a007d0f7c5fb4648cc2"} Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.049742 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-zd8tq" Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.055262 5133 generic.go:334] "Generic (PLEG): container finished" podID="8997b5b2-53b0-4b74-92db-7e82f3b44ea2" containerID="e672f71dd43f6c5fd47f09c5cddcf56f06f7047dab6afc91679d56327ffb0be8" exitCode=0 Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.055341 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pm4gn" event={"ID":"8997b5b2-53b0-4b74-92db-7e82f3b44ea2","Type":"ContainerDied","Data":"e672f71dd43f6c5fd47f09c5cddcf56f06f7047dab6afc91679d56327ffb0be8"} Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.055371 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pm4gn" event={"ID":"8997b5b2-53b0-4b74-92db-7e82f3b44ea2","Type":"ContainerDied","Data":"d1acf04170539ac6265d1fc19c187e7a7ed3570c7c142327fcb9b28c6811b3c7"} Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.055505 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pm4gn" Nov 21 13:48:56 crc kubenswrapper[5133]: W1121 13:48:56.058500 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod22623623_0726_4900_899d_c2d0e34a9562.slice/crio-04f7fe4a10759244f82c8149cc878f96e36865d858d923eb64c7e6f28f3c3a64 WatchSource:0}: Error finding container 04f7fe4a10759244f82c8149cc878f96e36865d858d923eb64c7e6f28f3c3a64: Status 404 returned error can't find the container with id 04f7fe4a10759244f82c8149cc878f96e36865d858d923eb64c7e6f28f3c3a64 Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.063018 5133 generic.go:334] "Generic (PLEG): container finished" podID="ce3b64c0-a2cb-416d-9c9a-14c10514bfac" containerID="32ae11dc70213e101059c11f64fdf08b022c007b12cb3704ae30d903322b3dc4" exitCode=0 Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.063104 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xp47m" event={"ID":"ce3b64c0-a2cb-416d-9c9a-14c10514bfac","Type":"ContainerDied","Data":"32ae11dc70213e101059c11f64fdf08b022c007b12cb3704ae30d903322b3dc4"} Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.063141 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xp47m" event={"ID":"ce3b64c0-a2cb-416d-9c9a-14c10514bfac","Type":"ContainerDied","Data":"e4b1bfce0f7ba5881296f773324ad77e0b8f7005f67b091174a3aa1019e7eec8"} Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.063708 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xp47m" Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.076926 5133 generic.go:334] "Generic (PLEG): container finished" podID="cb05a95a-9c39-4c50-b64b-91d20d5d5ebc" containerID="62b7930dabff41b19bdcc2b0fcf551112a65aa7cffc489e02560f8c1c7c6913f" exitCode=0 Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.076990 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wt7r9" event={"ID":"cb05a95a-9c39-4c50-b64b-91d20d5d5ebc","Type":"ContainerDied","Data":"62b7930dabff41b19bdcc2b0fcf551112a65aa7cffc489e02560f8c1c7c6913f"} Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.077048 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wt7r9" event={"ID":"cb05a95a-9c39-4c50-b64b-91d20d5d5ebc","Type":"ContainerDied","Data":"a454ff51dfa0c2b9c281e28a28a5e5d2bc608efcf607ce20f6d99021a0db6331"} Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.077133 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wt7r9" Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.092619 5133 scope.go:117] "RemoveContainer" containerID="52875960dd69a741670e6b5f81662dd6644dd707a21d4f9aa8d548fe2c8b2fc0" Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.101173 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-rfbh2"] Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.128053 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-rfbh2"] Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.134039 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-zd8tq"] Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.138325 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-zd8tq"] Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.147978 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wt7r9"] Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.154965 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-wt7r9"] Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.156973 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-pm4gn"] Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.161828 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-pm4gn"] Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.167709 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-xp47m"] Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.175566 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-xp47m"] Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.178010 5133 scope.go:117] "RemoveContainer" containerID="cb60a5b355f445f12a76ba7feb5c9d9318eac5889dd32532073329c0198bcb61" Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.208395 5133 scope.go:117] "RemoveContainer" containerID="2dfe1c6ac873af7082b6127a19dc85483e793fb345768fb8ef355c448475f62a" Nov 21 13:48:56 crc kubenswrapper[5133]: E1121 13:48:56.209078 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2dfe1c6ac873af7082b6127a19dc85483e793fb345768fb8ef355c448475f62a\": container with ID starting with 2dfe1c6ac873af7082b6127a19dc85483e793fb345768fb8ef355c448475f62a not found: ID does not exist" containerID="2dfe1c6ac873af7082b6127a19dc85483e793fb345768fb8ef355c448475f62a" Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.209123 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2dfe1c6ac873af7082b6127a19dc85483e793fb345768fb8ef355c448475f62a"} err="failed to get container status \"2dfe1c6ac873af7082b6127a19dc85483e793fb345768fb8ef355c448475f62a\": rpc error: code = NotFound desc = could not find container \"2dfe1c6ac873af7082b6127a19dc85483e793fb345768fb8ef355c448475f62a\": container with ID starting with 2dfe1c6ac873af7082b6127a19dc85483e793fb345768fb8ef355c448475f62a not found: ID does not exist" Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.209149 5133 scope.go:117] "RemoveContainer" containerID="52875960dd69a741670e6b5f81662dd6644dd707a21d4f9aa8d548fe2c8b2fc0" Nov 21 13:48:56 crc kubenswrapper[5133]: E1121 13:48:56.209529 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"52875960dd69a741670e6b5f81662dd6644dd707a21d4f9aa8d548fe2c8b2fc0\": container with ID starting with 52875960dd69a741670e6b5f81662dd6644dd707a21d4f9aa8d548fe2c8b2fc0 not found: ID does not exist" containerID="52875960dd69a741670e6b5f81662dd6644dd707a21d4f9aa8d548fe2c8b2fc0" Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.209551 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"52875960dd69a741670e6b5f81662dd6644dd707a21d4f9aa8d548fe2c8b2fc0"} err="failed to get container status \"52875960dd69a741670e6b5f81662dd6644dd707a21d4f9aa8d548fe2c8b2fc0\": rpc error: code = NotFound desc = could not find container \"52875960dd69a741670e6b5f81662dd6644dd707a21d4f9aa8d548fe2c8b2fc0\": container with ID starting with 52875960dd69a741670e6b5f81662dd6644dd707a21d4f9aa8d548fe2c8b2fc0 not found: ID does not exist" Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.209564 5133 scope.go:117] "RemoveContainer" containerID="cb60a5b355f445f12a76ba7feb5c9d9318eac5889dd32532073329c0198bcb61" Nov 21 13:48:56 crc kubenswrapper[5133]: E1121 13:48:56.209858 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cb60a5b355f445f12a76ba7feb5c9d9318eac5889dd32532073329c0198bcb61\": container with ID starting with cb60a5b355f445f12a76ba7feb5c9d9318eac5889dd32532073329c0198bcb61 not found: ID does not exist" containerID="cb60a5b355f445f12a76ba7feb5c9d9318eac5889dd32532073329c0198bcb61" Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.209878 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cb60a5b355f445f12a76ba7feb5c9d9318eac5889dd32532073329c0198bcb61"} err="failed to get container status \"cb60a5b355f445f12a76ba7feb5c9d9318eac5889dd32532073329c0198bcb61\": rpc error: code = NotFound desc = could not find container \"cb60a5b355f445f12a76ba7feb5c9d9318eac5889dd32532073329c0198bcb61\": container with ID starting with cb60a5b355f445f12a76ba7feb5c9d9318eac5889dd32532073329c0198bcb61 not found: ID does not exist" Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.209899 5133 scope.go:117] "RemoveContainer" containerID="c2b164d932ba8e7622063737dc8b40874979e3707cbda6cc12bb8bb02d12e74e" Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.230971 5133 scope.go:117] "RemoveContainer" containerID="c2b164d932ba8e7622063737dc8b40874979e3707cbda6cc12bb8bb02d12e74e" Nov 21 13:48:56 crc kubenswrapper[5133]: E1121 13:48:56.231739 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c2b164d932ba8e7622063737dc8b40874979e3707cbda6cc12bb8bb02d12e74e\": container with ID starting with c2b164d932ba8e7622063737dc8b40874979e3707cbda6cc12bb8bb02d12e74e not found: ID does not exist" containerID="c2b164d932ba8e7622063737dc8b40874979e3707cbda6cc12bb8bb02d12e74e" Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.231774 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c2b164d932ba8e7622063737dc8b40874979e3707cbda6cc12bb8bb02d12e74e"} err="failed to get container status \"c2b164d932ba8e7622063737dc8b40874979e3707cbda6cc12bb8bb02d12e74e\": rpc error: code = NotFound desc = could not find container \"c2b164d932ba8e7622063737dc8b40874979e3707cbda6cc12bb8bb02d12e74e\": container with ID starting with c2b164d932ba8e7622063737dc8b40874979e3707cbda6cc12bb8bb02d12e74e not found: ID does not exist" Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.231799 5133 scope.go:117] "RemoveContainer" containerID="e672f71dd43f6c5fd47f09c5cddcf56f06f7047dab6afc91679d56327ffb0be8" Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.250928 5133 scope.go:117] "RemoveContainer" containerID="cd3c62a381ab2d6afa736d8e1259a5c23aa8ed6f53829a80439e95f2d5bcfd50" Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.267651 5133 scope.go:117] "RemoveContainer" containerID="4ecd3d7eaa3de8888e1fa0a113fc65423969ce9e1d442a0c4456714aab37ea67" Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.282795 5133 scope.go:117] "RemoveContainer" containerID="e672f71dd43f6c5fd47f09c5cddcf56f06f7047dab6afc91679d56327ffb0be8" Nov 21 13:48:56 crc kubenswrapper[5133]: E1121 13:48:56.283397 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e672f71dd43f6c5fd47f09c5cddcf56f06f7047dab6afc91679d56327ffb0be8\": container with ID starting with e672f71dd43f6c5fd47f09c5cddcf56f06f7047dab6afc91679d56327ffb0be8 not found: ID does not exist" containerID="e672f71dd43f6c5fd47f09c5cddcf56f06f7047dab6afc91679d56327ffb0be8" Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.283472 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e672f71dd43f6c5fd47f09c5cddcf56f06f7047dab6afc91679d56327ffb0be8"} err="failed to get container status \"e672f71dd43f6c5fd47f09c5cddcf56f06f7047dab6afc91679d56327ffb0be8\": rpc error: code = NotFound desc = could not find container \"e672f71dd43f6c5fd47f09c5cddcf56f06f7047dab6afc91679d56327ffb0be8\": container with ID starting with e672f71dd43f6c5fd47f09c5cddcf56f06f7047dab6afc91679d56327ffb0be8 not found: ID does not exist" Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.283512 5133 scope.go:117] "RemoveContainer" containerID="cd3c62a381ab2d6afa736d8e1259a5c23aa8ed6f53829a80439e95f2d5bcfd50" Nov 21 13:48:56 crc kubenswrapper[5133]: E1121 13:48:56.283912 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cd3c62a381ab2d6afa736d8e1259a5c23aa8ed6f53829a80439e95f2d5bcfd50\": container with ID starting with cd3c62a381ab2d6afa736d8e1259a5c23aa8ed6f53829a80439e95f2d5bcfd50 not found: ID does not exist" containerID="cd3c62a381ab2d6afa736d8e1259a5c23aa8ed6f53829a80439e95f2d5bcfd50" Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.283951 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cd3c62a381ab2d6afa736d8e1259a5c23aa8ed6f53829a80439e95f2d5bcfd50"} err="failed to get container status \"cd3c62a381ab2d6afa736d8e1259a5c23aa8ed6f53829a80439e95f2d5bcfd50\": rpc error: code = NotFound desc = could not find container \"cd3c62a381ab2d6afa736d8e1259a5c23aa8ed6f53829a80439e95f2d5bcfd50\": container with ID starting with cd3c62a381ab2d6afa736d8e1259a5c23aa8ed6f53829a80439e95f2d5bcfd50 not found: ID does not exist" Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.283981 5133 scope.go:117] "RemoveContainer" containerID="4ecd3d7eaa3de8888e1fa0a113fc65423969ce9e1d442a0c4456714aab37ea67" Nov 21 13:48:56 crc kubenswrapper[5133]: E1121 13:48:56.284278 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4ecd3d7eaa3de8888e1fa0a113fc65423969ce9e1d442a0c4456714aab37ea67\": container with ID starting with 4ecd3d7eaa3de8888e1fa0a113fc65423969ce9e1d442a0c4456714aab37ea67 not found: ID does not exist" containerID="4ecd3d7eaa3de8888e1fa0a113fc65423969ce9e1d442a0c4456714aab37ea67" Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.284309 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4ecd3d7eaa3de8888e1fa0a113fc65423969ce9e1d442a0c4456714aab37ea67"} err="failed to get container status \"4ecd3d7eaa3de8888e1fa0a113fc65423969ce9e1d442a0c4456714aab37ea67\": rpc error: code = NotFound desc = could not find container \"4ecd3d7eaa3de8888e1fa0a113fc65423969ce9e1d442a0c4456714aab37ea67\": container with ID starting with 4ecd3d7eaa3de8888e1fa0a113fc65423969ce9e1d442a0c4456714aab37ea67 not found: ID does not exist" Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.284324 5133 scope.go:117] "RemoveContainer" containerID="32ae11dc70213e101059c11f64fdf08b022c007b12cb3704ae30d903322b3dc4" Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.302269 5133 scope.go:117] "RemoveContainer" containerID="86431023d5b4b836ae566d1d7b4e5681337574f42d6f5dbed6bd8aded0e355dc" Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.389716 5133 scope.go:117] "RemoveContainer" containerID="6f070e81de872627f154e47d87db194dec24af109e6461993acf644b41db38b6" Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.410675 5133 scope.go:117] "RemoveContainer" containerID="32ae11dc70213e101059c11f64fdf08b022c007b12cb3704ae30d903322b3dc4" Nov 21 13:48:56 crc kubenswrapper[5133]: E1121 13:48:56.411268 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"32ae11dc70213e101059c11f64fdf08b022c007b12cb3704ae30d903322b3dc4\": container with ID starting with 32ae11dc70213e101059c11f64fdf08b022c007b12cb3704ae30d903322b3dc4 not found: ID does not exist" containerID="32ae11dc70213e101059c11f64fdf08b022c007b12cb3704ae30d903322b3dc4" Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.411319 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"32ae11dc70213e101059c11f64fdf08b022c007b12cb3704ae30d903322b3dc4"} err="failed to get container status \"32ae11dc70213e101059c11f64fdf08b022c007b12cb3704ae30d903322b3dc4\": rpc error: code = NotFound desc = could not find container \"32ae11dc70213e101059c11f64fdf08b022c007b12cb3704ae30d903322b3dc4\": container with ID starting with 32ae11dc70213e101059c11f64fdf08b022c007b12cb3704ae30d903322b3dc4 not found: ID does not exist" Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.411359 5133 scope.go:117] "RemoveContainer" containerID="86431023d5b4b836ae566d1d7b4e5681337574f42d6f5dbed6bd8aded0e355dc" Nov 21 13:48:56 crc kubenswrapper[5133]: E1121 13:48:56.411678 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"86431023d5b4b836ae566d1d7b4e5681337574f42d6f5dbed6bd8aded0e355dc\": container with ID starting with 86431023d5b4b836ae566d1d7b4e5681337574f42d6f5dbed6bd8aded0e355dc not found: ID does not exist" containerID="86431023d5b4b836ae566d1d7b4e5681337574f42d6f5dbed6bd8aded0e355dc" Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.411712 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"86431023d5b4b836ae566d1d7b4e5681337574f42d6f5dbed6bd8aded0e355dc"} err="failed to get container status \"86431023d5b4b836ae566d1d7b4e5681337574f42d6f5dbed6bd8aded0e355dc\": rpc error: code = NotFound desc = could not find container \"86431023d5b4b836ae566d1d7b4e5681337574f42d6f5dbed6bd8aded0e355dc\": container with ID starting with 86431023d5b4b836ae566d1d7b4e5681337574f42d6f5dbed6bd8aded0e355dc not found: ID does not exist" Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.411733 5133 scope.go:117] "RemoveContainer" containerID="6f070e81de872627f154e47d87db194dec24af109e6461993acf644b41db38b6" Nov 21 13:48:56 crc kubenswrapper[5133]: E1121 13:48:56.412017 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6f070e81de872627f154e47d87db194dec24af109e6461993acf644b41db38b6\": container with ID starting with 6f070e81de872627f154e47d87db194dec24af109e6461993acf644b41db38b6 not found: ID does not exist" containerID="6f070e81de872627f154e47d87db194dec24af109e6461993acf644b41db38b6" Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.412047 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6f070e81de872627f154e47d87db194dec24af109e6461993acf644b41db38b6"} err="failed to get container status \"6f070e81de872627f154e47d87db194dec24af109e6461993acf644b41db38b6\": rpc error: code = NotFound desc = could not find container \"6f070e81de872627f154e47d87db194dec24af109e6461993acf644b41db38b6\": container with ID starting with 6f070e81de872627f154e47d87db194dec24af109e6461993acf644b41db38b6 not found: ID does not exist" Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.412071 5133 scope.go:117] "RemoveContainer" containerID="62b7930dabff41b19bdcc2b0fcf551112a65aa7cffc489e02560f8c1c7c6913f" Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.431536 5133 scope.go:117] "RemoveContainer" containerID="a767564cb8bd4637e0d7e074663fa0f7a31f01c35b3ffaf202e950bf820c756f" Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.446909 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-ggrbw"] Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.461766 5133 scope.go:117] "RemoveContainer" containerID="c74ce0ee8da8a5ef559b51b3c06036145660861895d1177711f1540d0e95d035" Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.469604 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8997b5b2-53b0-4b74-92db-7e82f3b44ea2" path="/var/lib/kubelet/pods/8997b5b2-53b0-4b74-92db-7e82f3b44ea2/volumes" Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.470639 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8bbf68aa-448e-453c-8df2-839594103920" path="/var/lib/kubelet/pods/8bbf68aa-448e-453c-8df2-839594103920/volumes" Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.471254 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a6b4a203-3329-4e08-b2d6-61ec7ee18e7e" path="/var/lib/kubelet/pods/a6b4a203-3329-4e08-b2d6-61ec7ee18e7e/volumes" Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.473939 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cb05a95a-9c39-4c50-b64b-91d20d5d5ebc" path="/var/lib/kubelet/pods/cb05a95a-9c39-4c50-b64b-91d20d5d5ebc/volumes" Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.474842 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ce3b64c0-a2cb-416d-9c9a-14c10514bfac" path="/var/lib/kubelet/pods/ce3b64c0-a2cb-416d-9c9a-14c10514bfac/volumes" Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.494344 5133 scope.go:117] "RemoveContainer" containerID="62b7930dabff41b19bdcc2b0fcf551112a65aa7cffc489e02560f8c1c7c6913f" Nov 21 13:48:56 crc kubenswrapper[5133]: E1121 13:48:56.500192 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"62b7930dabff41b19bdcc2b0fcf551112a65aa7cffc489e02560f8c1c7c6913f\": container with ID starting with 62b7930dabff41b19bdcc2b0fcf551112a65aa7cffc489e02560f8c1c7c6913f not found: ID does not exist" containerID="62b7930dabff41b19bdcc2b0fcf551112a65aa7cffc489e02560f8c1c7c6913f" Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.500246 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"62b7930dabff41b19bdcc2b0fcf551112a65aa7cffc489e02560f8c1c7c6913f"} err="failed to get container status \"62b7930dabff41b19bdcc2b0fcf551112a65aa7cffc489e02560f8c1c7c6913f\": rpc error: code = NotFound desc = could not find container \"62b7930dabff41b19bdcc2b0fcf551112a65aa7cffc489e02560f8c1c7c6913f\": container with ID starting with 62b7930dabff41b19bdcc2b0fcf551112a65aa7cffc489e02560f8c1c7c6913f not found: ID does not exist" Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.500286 5133 scope.go:117] "RemoveContainer" containerID="a767564cb8bd4637e0d7e074663fa0f7a31f01c35b3ffaf202e950bf820c756f" Nov 21 13:48:56 crc kubenswrapper[5133]: E1121 13:48:56.500796 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a767564cb8bd4637e0d7e074663fa0f7a31f01c35b3ffaf202e950bf820c756f\": container with ID starting with a767564cb8bd4637e0d7e074663fa0f7a31f01c35b3ffaf202e950bf820c756f not found: ID does not exist" containerID="a767564cb8bd4637e0d7e074663fa0f7a31f01c35b3ffaf202e950bf820c756f" Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.500886 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a767564cb8bd4637e0d7e074663fa0f7a31f01c35b3ffaf202e950bf820c756f"} err="failed to get container status \"a767564cb8bd4637e0d7e074663fa0f7a31f01c35b3ffaf202e950bf820c756f\": rpc error: code = NotFound desc = could not find container \"a767564cb8bd4637e0d7e074663fa0f7a31f01c35b3ffaf202e950bf820c756f\": container with ID starting with a767564cb8bd4637e0d7e074663fa0f7a31f01c35b3ffaf202e950bf820c756f not found: ID does not exist" Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.500940 5133 scope.go:117] "RemoveContainer" containerID="c74ce0ee8da8a5ef559b51b3c06036145660861895d1177711f1540d0e95d035" Nov 21 13:48:56 crc kubenswrapper[5133]: E1121 13:48:56.501402 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c74ce0ee8da8a5ef559b51b3c06036145660861895d1177711f1540d0e95d035\": container with ID starting with c74ce0ee8da8a5ef559b51b3c06036145660861895d1177711f1540d0e95d035 not found: ID does not exist" containerID="c74ce0ee8da8a5ef559b51b3c06036145660861895d1177711f1540d0e95d035" Nov 21 13:48:56 crc kubenswrapper[5133]: I1121 13:48:56.501434 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c74ce0ee8da8a5ef559b51b3c06036145660861895d1177711f1540d0e95d035"} err="failed to get container status \"c74ce0ee8da8a5ef559b51b3c06036145660861895d1177711f1540d0e95d035\": rpc error: code = NotFound desc = could not find container \"c74ce0ee8da8a5ef559b51b3c06036145660861895d1177711f1540d0e95d035\": container with ID starting with c74ce0ee8da8a5ef559b51b3c06036145660861895d1177711f1540d0e95d035 not found: ID does not exist" Nov 21 13:48:57 crc kubenswrapper[5133]: I1121 13:48:57.096605 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-qp85d" event={"ID":"22623623-0726-4900-899d-c2d0e34a9562","Type":"ContainerStarted","Data":"5ef582d484db0dd984979392e805118021828825523fb83d343726bed61d3f0d"} Nov 21 13:48:57 crc kubenswrapper[5133]: I1121 13:48:57.096659 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-qp85d" event={"ID":"22623623-0726-4900-899d-c2d0e34a9562","Type":"ContainerStarted","Data":"04f7fe4a10759244f82c8149cc878f96e36865d858d923eb64c7e6f28f3c3a64"} Nov 21 13:48:57 crc kubenswrapper[5133]: I1121 13:48:57.096961 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-qp85d" Nov 21 13:48:57 crc kubenswrapper[5133]: I1121 13:48:57.102660 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-qp85d" Nov 21 13:48:57 crc kubenswrapper[5133]: I1121 13:48:57.124847 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-qp85d" podStartSLOduration=2.124829015 podStartE2EDuration="2.124829015s" podCreationTimestamp="2025-11-21 13:48:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 13:48:57.122034241 +0000 UTC m=+396.919866509" watchObservedRunningTime="2025-11-21 13:48:57.124829015 +0000 UTC m=+396.922661263" Nov 21 13:48:57 crc kubenswrapper[5133]: I1121 13:48:57.386830 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-jtgkd"] Nov 21 13:48:57 crc kubenswrapper[5133]: E1121 13:48:57.387521 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6b4a203-3329-4e08-b2d6-61ec7ee18e7e" containerName="extract-content" Nov 21 13:48:57 crc kubenswrapper[5133]: I1121 13:48:57.387541 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6b4a203-3329-4e08-b2d6-61ec7ee18e7e" containerName="extract-content" Nov 21 13:48:57 crc kubenswrapper[5133]: E1121 13:48:57.387560 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6b4a203-3329-4e08-b2d6-61ec7ee18e7e" containerName="registry-server" Nov 21 13:48:57 crc kubenswrapper[5133]: I1121 13:48:57.387571 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6b4a203-3329-4e08-b2d6-61ec7ee18e7e" containerName="registry-server" Nov 21 13:48:57 crc kubenswrapper[5133]: E1121 13:48:57.387588 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8bbf68aa-448e-453c-8df2-839594103920" containerName="marketplace-operator" Nov 21 13:48:57 crc kubenswrapper[5133]: I1121 13:48:57.387599 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="8bbf68aa-448e-453c-8df2-839594103920" containerName="marketplace-operator" Nov 21 13:48:57 crc kubenswrapper[5133]: E1121 13:48:57.387618 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ce3b64c0-a2cb-416d-9c9a-14c10514bfac" containerName="extract-utilities" Nov 21 13:48:57 crc kubenswrapper[5133]: I1121 13:48:57.387629 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="ce3b64c0-a2cb-416d-9c9a-14c10514bfac" containerName="extract-utilities" Nov 21 13:48:57 crc kubenswrapper[5133]: E1121 13:48:57.387643 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb05a95a-9c39-4c50-b64b-91d20d5d5ebc" containerName="registry-server" Nov 21 13:48:57 crc kubenswrapper[5133]: I1121 13:48:57.387654 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb05a95a-9c39-4c50-b64b-91d20d5d5ebc" containerName="registry-server" Nov 21 13:48:57 crc kubenswrapper[5133]: E1121 13:48:57.387669 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb05a95a-9c39-4c50-b64b-91d20d5d5ebc" containerName="extract-utilities" Nov 21 13:48:57 crc kubenswrapper[5133]: I1121 13:48:57.387679 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb05a95a-9c39-4c50-b64b-91d20d5d5ebc" containerName="extract-utilities" Nov 21 13:48:57 crc kubenswrapper[5133]: E1121 13:48:57.387690 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb05a95a-9c39-4c50-b64b-91d20d5d5ebc" containerName="extract-content" Nov 21 13:48:57 crc kubenswrapper[5133]: I1121 13:48:57.387700 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb05a95a-9c39-4c50-b64b-91d20d5d5ebc" containerName="extract-content" Nov 21 13:48:57 crc kubenswrapper[5133]: E1121 13:48:57.387715 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8997b5b2-53b0-4b74-92db-7e82f3b44ea2" containerName="extract-content" Nov 21 13:48:57 crc kubenswrapper[5133]: I1121 13:48:57.387726 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="8997b5b2-53b0-4b74-92db-7e82f3b44ea2" containerName="extract-content" Nov 21 13:48:57 crc kubenswrapper[5133]: E1121 13:48:57.387739 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6b4a203-3329-4e08-b2d6-61ec7ee18e7e" containerName="extract-utilities" Nov 21 13:48:57 crc kubenswrapper[5133]: I1121 13:48:57.387750 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6b4a203-3329-4e08-b2d6-61ec7ee18e7e" containerName="extract-utilities" Nov 21 13:48:57 crc kubenswrapper[5133]: E1121 13:48:57.387770 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8997b5b2-53b0-4b74-92db-7e82f3b44ea2" containerName="registry-server" Nov 21 13:48:57 crc kubenswrapper[5133]: I1121 13:48:57.387781 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="8997b5b2-53b0-4b74-92db-7e82f3b44ea2" containerName="registry-server" Nov 21 13:48:57 crc kubenswrapper[5133]: E1121 13:48:57.387795 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8997b5b2-53b0-4b74-92db-7e82f3b44ea2" containerName="extract-utilities" Nov 21 13:48:57 crc kubenswrapper[5133]: I1121 13:48:57.387803 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="8997b5b2-53b0-4b74-92db-7e82f3b44ea2" containerName="extract-utilities" Nov 21 13:48:57 crc kubenswrapper[5133]: E1121 13:48:57.387812 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ce3b64c0-a2cb-416d-9c9a-14c10514bfac" containerName="extract-content" Nov 21 13:48:57 crc kubenswrapper[5133]: I1121 13:48:57.387821 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="ce3b64c0-a2cb-416d-9c9a-14c10514bfac" containerName="extract-content" Nov 21 13:48:57 crc kubenswrapper[5133]: E1121 13:48:57.387831 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ce3b64c0-a2cb-416d-9c9a-14c10514bfac" containerName="registry-server" Nov 21 13:48:57 crc kubenswrapper[5133]: I1121 13:48:57.387839 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="ce3b64c0-a2cb-416d-9c9a-14c10514bfac" containerName="registry-server" Nov 21 13:48:57 crc kubenswrapper[5133]: I1121 13:48:57.387965 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="a6b4a203-3329-4e08-b2d6-61ec7ee18e7e" containerName="registry-server" Nov 21 13:48:57 crc kubenswrapper[5133]: I1121 13:48:57.387978 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="8997b5b2-53b0-4b74-92db-7e82f3b44ea2" containerName="registry-server" Nov 21 13:48:57 crc kubenswrapper[5133]: I1121 13:48:57.388019 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="cb05a95a-9c39-4c50-b64b-91d20d5d5ebc" containerName="registry-server" Nov 21 13:48:57 crc kubenswrapper[5133]: I1121 13:48:57.388036 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="ce3b64c0-a2cb-416d-9c9a-14c10514bfac" containerName="registry-server" Nov 21 13:48:57 crc kubenswrapper[5133]: I1121 13:48:57.388048 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="8bbf68aa-448e-453c-8df2-839594103920" containerName="marketplace-operator" Nov 21 13:48:57 crc kubenswrapper[5133]: I1121 13:48:57.388986 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jtgkd" Nov 21 13:48:57 crc kubenswrapper[5133]: I1121 13:48:57.391711 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 21 13:48:57 crc kubenswrapper[5133]: I1121 13:48:57.406215 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jtgkd"] Nov 21 13:48:57 crc kubenswrapper[5133]: I1121 13:48:57.537912 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3f8cf09c-4d75-475f-b3d9-9f0b892f0bc4-catalog-content\") pod \"redhat-marketplace-jtgkd\" (UID: \"3f8cf09c-4d75-475f-b3d9-9f0b892f0bc4\") " pod="openshift-marketplace/redhat-marketplace-jtgkd" Nov 21 13:48:57 crc kubenswrapper[5133]: I1121 13:48:57.537982 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3f8cf09c-4d75-475f-b3d9-9f0b892f0bc4-utilities\") pod \"redhat-marketplace-jtgkd\" (UID: \"3f8cf09c-4d75-475f-b3d9-9f0b892f0bc4\") " pod="openshift-marketplace/redhat-marketplace-jtgkd" Nov 21 13:48:57 crc kubenswrapper[5133]: I1121 13:48:57.538095 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wg8vj\" (UniqueName: \"kubernetes.io/projected/3f8cf09c-4d75-475f-b3d9-9f0b892f0bc4-kube-api-access-wg8vj\") pod \"redhat-marketplace-jtgkd\" (UID: \"3f8cf09c-4d75-475f-b3d9-9f0b892f0bc4\") " pod="openshift-marketplace/redhat-marketplace-jtgkd" Nov 21 13:48:57 crc kubenswrapper[5133]: I1121 13:48:57.589352 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-7ls26"] Nov 21 13:48:57 crc kubenswrapper[5133]: I1121 13:48:57.590383 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7ls26" Nov 21 13:48:57 crc kubenswrapper[5133]: I1121 13:48:57.592467 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 21 13:48:57 crc kubenswrapper[5133]: I1121 13:48:57.603228 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-7ls26"] Nov 21 13:48:57 crc kubenswrapper[5133]: I1121 13:48:57.639316 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wg8vj\" (UniqueName: \"kubernetes.io/projected/3f8cf09c-4d75-475f-b3d9-9f0b892f0bc4-kube-api-access-wg8vj\") pod \"redhat-marketplace-jtgkd\" (UID: \"3f8cf09c-4d75-475f-b3d9-9f0b892f0bc4\") " pod="openshift-marketplace/redhat-marketplace-jtgkd" Nov 21 13:48:57 crc kubenswrapper[5133]: I1121 13:48:57.639705 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3f8cf09c-4d75-475f-b3d9-9f0b892f0bc4-catalog-content\") pod \"redhat-marketplace-jtgkd\" (UID: \"3f8cf09c-4d75-475f-b3d9-9f0b892f0bc4\") " pod="openshift-marketplace/redhat-marketplace-jtgkd" Nov 21 13:48:57 crc kubenswrapper[5133]: I1121 13:48:57.639816 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3f8cf09c-4d75-475f-b3d9-9f0b892f0bc4-utilities\") pod \"redhat-marketplace-jtgkd\" (UID: \"3f8cf09c-4d75-475f-b3d9-9f0b892f0bc4\") " pod="openshift-marketplace/redhat-marketplace-jtgkd" Nov 21 13:48:57 crc kubenswrapper[5133]: I1121 13:48:57.640261 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3f8cf09c-4d75-475f-b3d9-9f0b892f0bc4-catalog-content\") pod \"redhat-marketplace-jtgkd\" (UID: \"3f8cf09c-4d75-475f-b3d9-9f0b892f0bc4\") " pod="openshift-marketplace/redhat-marketplace-jtgkd" Nov 21 13:48:57 crc kubenswrapper[5133]: I1121 13:48:57.640664 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3f8cf09c-4d75-475f-b3d9-9f0b892f0bc4-utilities\") pod \"redhat-marketplace-jtgkd\" (UID: \"3f8cf09c-4d75-475f-b3d9-9f0b892f0bc4\") " pod="openshift-marketplace/redhat-marketplace-jtgkd" Nov 21 13:48:57 crc kubenswrapper[5133]: I1121 13:48:57.661639 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wg8vj\" (UniqueName: \"kubernetes.io/projected/3f8cf09c-4d75-475f-b3d9-9f0b892f0bc4-kube-api-access-wg8vj\") pod \"redhat-marketplace-jtgkd\" (UID: \"3f8cf09c-4d75-475f-b3d9-9f0b892f0bc4\") " pod="openshift-marketplace/redhat-marketplace-jtgkd" Nov 21 13:48:57 crc kubenswrapper[5133]: I1121 13:48:57.714890 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jtgkd" Nov 21 13:48:57 crc kubenswrapper[5133]: I1121 13:48:57.741058 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7c7ae814-704e-4480-8297-6a5309c94c22-catalog-content\") pod \"redhat-operators-7ls26\" (UID: \"7c7ae814-704e-4480-8297-6a5309c94c22\") " pod="openshift-marketplace/redhat-operators-7ls26" Nov 21 13:48:57 crc kubenswrapper[5133]: I1121 13:48:57.741133 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7c7ae814-704e-4480-8297-6a5309c94c22-utilities\") pod \"redhat-operators-7ls26\" (UID: \"7c7ae814-704e-4480-8297-6a5309c94c22\") " pod="openshift-marketplace/redhat-operators-7ls26" Nov 21 13:48:57 crc kubenswrapper[5133]: I1121 13:48:57.741165 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-srlc2\" (UniqueName: \"kubernetes.io/projected/7c7ae814-704e-4480-8297-6a5309c94c22-kube-api-access-srlc2\") pod \"redhat-operators-7ls26\" (UID: \"7c7ae814-704e-4480-8297-6a5309c94c22\") " pod="openshift-marketplace/redhat-operators-7ls26" Nov 21 13:48:57 crc kubenswrapper[5133]: I1121 13:48:57.842608 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7c7ae814-704e-4480-8297-6a5309c94c22-catalog-content\") pod \"redhat-operators-7ls26\" (UID: \"7c7ae814-704e-4480-8297-6a5309c94c22\") " pod="openshift-marketplace/redhat-operators-7ls26" Nov 21 13:48:57 crc kubenswrapper[5133]: I1121 13:48:57.842669 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7c7ae814-704e-4480-8297-6a5309c94c22-utilities\") pod \"redhat-operators-7ls26\" (UID: \"7c7ae814-704e-4480-8297-6a5309c94c22\") " pod="openshift-marketplace/redhat-operators-7ls26" Nov 21 13:48:57 crc kubenswrapper[5133]: I1121 13:48:57.842698 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-srlc2\" (UniqueName: \"kubernetes.io/projected/7c7ae814-704e-4480-8297-6a5309c94c22-kube-api-access-srlc2\") pod \"redhat-operators-7ls26\" (UID: \"7c7ae814-704e-4480-8297-6a5309c94c22\") " pod="openshift-marketplace/redhat-operators-7ls26" Nov 21 13:48:57 crc kubenswrapper[5133]: I1121 13:48:57.844059 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7c7ae814-704e-4480-8297-6a5309c94c22-catalog-content\") pod \"redhat-operators-7ls26\" (UID: \"7c7ae814-704e-4480-8297-6a5309c94c22\") " pod="openshift-marketplace/redhat-operators-7ls26" Nov 21 13:48:57 crc kubenswrapper[5133]: I1121 13:48:57.844343 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7c7ae814-704e-4480-8297-6a5309c94c22-utilities\") pod \"redhat-operators-7ls26\" (UID: \"7c7ae814-704e-4480-8297-6a5309c94c22\") " pod="openshift-marketplace/redhat-operators-7ls26" Nov 21 13:48:57 crc kubenswrapper[5133]: I1121 13:48:57.861151 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-srlc2\" (UniqueName: \"kubernetes.io/projected/7c7ae814-704e-4480-8297-6a5309c94c22-kube-api-access-srlc2\") pod \"redhat-operators-7ls26\" (UID: \"7c7ae814-704e-4480-8297-6a5309c94c22\") " pod="openshift-marketplace/redhat-operators-7ls26" Nov 21 13:48:57 crc kubenswrapper[5133]: I1121 13:48:57.907722 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7ls26" Nov 21 13:48:57 crc kubenswrapper[5133]: I1121 13:48:57.954851 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jtgkd"] Nov 21 13:48:57 crc kubenswrapper[5133]: W1121 13:48:57.960072 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3f8cf09c_4d75_475f_b3d9_9f0b892f0bc4.slice/crio-d8bf92e65563b7883cb34f9e6416c881078d50237871c3e55e53b867823ba2fd WatchSource:0}: Error finding container d8bf92e65563b7883cb34f9e6416c881078d50237871c3e55e53b867823ba2fd: Status 404 returned error can't find the container with id d8bf92e65563b7883cb34f9e6416c881078d50237871c3e55e53b867823ba2fd Nov 21 13:48:58 crc kubenswrapper[5133]: I1121 13:48:58.108602 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jtgkd" event={"ID":"3f8cf09c-4d75-475f-b3d9-9f0b892f0bc4","Type":"ContainerStarted","Data":"d8bf92e65563b7883cb34f9e6416c881078d50237871c3e55e53b867823ba2fd"} Nov 21 13:48:58 crc kubenswrapper[5133]: I1121 13:48:58.135398 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-7ls26"] Nov 21 13:48:58 crc kubenswrapper[5133]: W1121 13:48:58.146093 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7c7ae814_704e_4480_8297_6a5309c94c22.slice/crio-27a17ef61cd6771d014c7d269f255a2accc9e1fa43993cfe55c06914442b1362 WatchSource:0}: Error finding container 27a17ef61cd6771d014c7d269f255a2accc9e1fa43993cfe55c06914442b1362: Status 404 returned error can't find the container with id 27a17ef61cd6771d014c7d269f255a2accc9e1fa43993cfe55c06914442b1362 Nov 21 13:48:59 crc kubenswrapper[5133]: I1121 13:48:59.118290 5133 generic.go:334] "Generic (PLEG): container finished" podID="3f8cf09c-4d75-475f-b3d9-9f0b892f0bc4" containerID="57523087276ca2d3c6f62430327bba7779dddac409b8f6fde62ff7f156efa2fd" exitCode=0 Nov 21 13:48:59 crc kubenswrapper[5133]: I1121 13:48:59.118365 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jtgkd" event={"ID":"3f8cf09c-4d75-475f-b3d9-9f0b892f0bc4","Type":"ContainerDied","Data":"57523087276ca2d3c6f62430327bba7779dddac409b8f6fde62ff7f156efa2fd"} Nov 21 13:48:59 crc kubenswrapper[5133]: I1121 13:48:59.121064 5133 generic.go:334] "Generic (PLEG): container finished" podID="7c7ae814-704e-4480-8297-6a5309c94c22" containerID="6f50c127bb6430faaad2bbdffaf47d37c291887fa63a71edabe860e98256050e" exitCode=0 Nov 21 13:48:59 crc kubenswrapper[5133]: I1121 13:48:59.122657 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7ls26" event={"ID":"7c7ae814-704e-4480-8297-6a5309c94c22","Type":"ContainerDied","Data":"6f50c127bb6430faaad2bbdffaf47d37c291887fa63a71edabe860e98256050e"} Nov 21 13:48:59 crc kubenswrapper[5133]: I1121 13:48:59.122693 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7ls26" event={"ID":"7c7ae814-704e-4480-8297-6a5309c94c22","Type":"ContainerStarted","Data":"27a17ef61cd6771d014c7d269f255a2accc9e1fa43993cfe55c06914442b1362"} Nov 21 13:48:59 crc kubenswrapper[5133]: I1121 13:48:59.786398 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-gwctl"] Nov 21 13:48:59 crc kubenswrapper[5133]: I1121 13:48:59.791599 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-gwctl" Nov 21 13:48:59 crc kubenswrapper[5133]: I1121 13:48:59.795522 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 21 13:48:59 crc kubenswrapper[5133]: I1121 13:48:59.805155 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-gwctl"] Nov 21 13:48:59 crc kubenswrapper[5133]: I1121 13:48:59.875277 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d35fdb2a-b5cc-4bdb-84c9-d447c6e86842-utilities\") pod \"community-operators-gwctl\" (UID: \"d35fdb2a-b5cc-4bdb-84c9-d447c6e86842\") " pod="openshift-marketplace/community-operators-gwctl" Nov 21 13:48:59 crc kubenswrapper[5133]: I1121 13:48:59.875340 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d35fdb2a-b5cc-4bdb-84c9-d447c6e86842-catalog-content\") pod \"community-operators-gwctl\" (UID: \"d35fdb2a-b5cc-4bdb-84c9-d447c6e86842\") " pod="openshift-marketplace/community-operators-gwctl" Nov 21 13:48:59 crc kubenswrapper[5133]: I1121 13:48:59.875408 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bl6sc\" (UniqueName: \"kubernetes.io/projected/d35fdb2a-b5cc-4bdb-84c9-d447c6e86842-kube-api-access-bl6sc\") pod \"community-operators-gwctl\" (UID: \"d35fdb2a-b5cc-4bdb-84c9-d447c6e86842\") " pod="openshift-marketplace/community-operators-gwctl" Nov 21 13:48:59 crc kubenswrapper[5133]: I1121 13:48:59.976545 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d35fdb2a-b5cc-4bdb-84c9-d447c6e86842-utilities\") pod \"community-operators-gwctl\" (UID: \"d35fdb2a-b5cc-4bdb-84c9-d447c6e86842\") " pod="openshift-marketplace/community-operators-gwctl" Nov 21 13:48:59 crc kubenswrapper[5133]: I1121 13:48:59.976602 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d35fdb2a-b5cc-4bdb-84c9-d447c6e86842-catalog-content\") pod \"community-operators-gwctl\" (UID: \"d35fdb2a-b5cc-4bdb-84c9-d447c6e86842\") " pod="openshift-marketplace/community-operators-gwctl" Nov 21 13:48:59 crc kubenswrapper[5133]: I1121 13:48:59.976660 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bl6sc\" (UniqueName: \"kubernetes.io/projected/d35fdb2a-b5cc-4bdb-84c9-d447c6e86842-kube-api-access-bl6sc\") pod \"community-operators-gwctl\" (UID: \"d35fdb2a-b5cc-4bdb-84c9-d447c6e86842\") " pod="openshift-marketplace/community-operators-gwctl" Nov 21 13:48:59 crc kubenswrapper[5133]: I1121 13:48:59.978521 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d35fdb2a-b5cc-4bdb-84c9-d447c6e86842-utilities\") pod \"community-operators-gwctl\" (UID: \"d35fdb2a-b5cc-4bdb-84c9-d447c6e86842\") " pod="openshift-marketplace/community-operators-gwctl" Nov 21 13:48:59 crc kubenswrapper[5133]: I1121 13:48:59.978768 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d35fdb2a-b5cc-4bdb-84c9-d447c6e86842-catalog-content\") pod \"community-operators-gwctl\" (UID: \"d35fdb2a-b5cc-4bdb-84c9-d447c6e86842\") " pod="openshift-marketplace/community-operators-gwctl" Nov 21 13:48:59 crc kubenswrapper[5133]: I1121 13:48:59.988884 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-jfgzq"] Nov 21 13:48:59 crc kubenswrapper[5133]: I1121 13:48:59.990094 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jfgzq" Nov 21 13:48:59 crc kubenswrapper[5133]: I1121 13:48:59.994350 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 21 13:49:00 crc kubenswrapper[5133]: I1121 13:49:00.004292 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-jfgzq"] Nov 21 13:49:00 crc kubenswrapper[5133]: I1121 13:49:00.008694 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bl6sc\" (UniqueName: \"kubernetes.io/projected/d35fdb2a-b5cc-4bdb-84c9-d447c6e86842-kube-api-access-bl6sc\") pod \"community-operators-gwctl\" (UID: \"d35fdb2a-b5cc-4bdb-84c9-d447c6e86842\") " pod="openshift-marketplace/community-operators-gwctl" Nov 21 13:49:00 crc kubenswrapper[5133]: I1121 13:49:00.115080 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-gwctl" Nov 21 13:49:00 crc kubenswrapper[5133]: I1121 13:49:00.179249 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7e13b754-a463-4945-95dd-5bbac447c494-catalog-content\") pod \"certified-operators-jfgzq\" (UID: \"7e13b754-a463-4945-95dd-5bbac447c494\") " pod="openshift-marketplace/certified-operators-jfgzq" Nov 21 13:49:00 crc kubenswrapper[5133]: I1121 13:49:00.179859 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6bfp6\" (UniqueName: \"kubernetes.io/projected/7e13b754-a463-4945-95dd-5bbac447c494-kube-api-access-6bfp6\") pod \"certified-operators-jfgzq\" (UID: \"7e13b754-a463-4945-95dd-5bbac447c494\") " pod="openshift-marketplace/certified-operators-jfgzq" Nov 21 13:49:00 crc kubenswrapper[5133]: I1121 13:49:00.179896 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7e13b754-a463-4945-95dd-5bbac447c494-utilities\") pod \"certified-operators-jfgzq\" (UID: \"7e13b754-a463-4945-95dd-5bbac447c494\") " pod="openshift-marketplace/certified-operators-jfgzq" Nov 21 13:49:00 crc kubenswrapper[5133]: I1121 13:49:00.281233 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7e13b754-a463-4945-95dd-5bbac447c494-catalog-content\") pod \"certified-operators-jfgzq\" (UID: \"7e13b754-a463-4945-95dd-5bbac447c494\") " pod="openshift-marketplace/certified-operators-jfgzq" Nov 21 13:49:00 crc kubenswrapper[5133]: I1121 13:49:00.281307 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6bfp6\" (UniqueName: \"kubernetes.io/projected/7e13b754-a463-4945-95dd-5bbac447c494-kube-api-access-6bfp6\") pod \"certified-operators-jfgzq\" (UID: \"7e13b754-a463-4945-95dd-5bbac447c494\") " pod="openshift-marketplace/certified-operators-jfgzq" Nov 21 13:49:00 crc kubenswrapper[5133]: I1121 13:49:00.281380 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7e13b754-a463-4945-95dd-5bbac447c494-utilities\") pod \"certified-operators-jfgzq\" (UID: \"7e13b754-a463-4945-95dd-5bbac447c494\") " pod="openshift-marketplace/certified-operators-jfgzq" Nov 21 13:49:00 crc kubenswrapper[5133]: I1121 13:49:00.282031 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7e13b754-a463-4945-95dd-5bbac447c494-utilities\") pod \"certified-operators-jfgzq\" (UID: \"7e13b754-a463-4945-95dd-5bbac447c494\") " pod="openshift-marketplace/certified-operators-jfgzq" Nov 21 13:49:00 crc kubenswrapper[5133]: I1121 13:49:00.282122 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7e13b754-a463-4945-95dd-5bbac447c494-catalog-content\") pod \"certified-operators-jfgzq\" (UID: \"7e13b754-a463-4945-95dd-5bbac447c494\") " pod="openshift-marketplace/certified-operators-jfgzq" Nov 21 13:49:00 crc kubenswrapper[5133]: I1121 13:49:00.304597 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6bfp6\" (UniqueName: \"kubernetes.io/projected/7e13b754-a463-4945-95dd-5bbac447c494-kube-api-access-6bfp6\") pod \"certified-operators-jfgzq\" (UID: \"7e13b754-a463-4945-95dd-5bbac447c494\") " pod="openshift-marketplace/certified-operators-jfgzq" Nov 21 13:49:00 crc kubenswrapper[5133]: I1121 13:49:00.318457 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jfgzq" Nov 21 13:49:00 crc kubenswrapper[5133]: I1121 13:49:00.322349 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-gwctl"] Nov 21 13:49:00 crc kubenswrapper[5133]: I1121 13:49:00.540855 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-jfgzq"] Nov 21 13:49:01 crc kubenswrapper[5133]: I1121 13:49:01.136745 5133 generic.go:334] "Generic (PLEG): container finished" podID="d35fdb2a-b5cc-4bdb-84c9-d447c6e86842" containerID="12258c9f04ca0b87cd791bdb5a5b2aaac12902c58960591bd1bdff68d30f5575" exitCode=0 Nov 21 13:49:01 crc kubenswrapper[5133]: I1121 13:49:01.136857 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gwctl" event={"ID":"d35fdb2a-b5cc-4bdb-84c9-d447c6e86842","Type":"ContainerDied","Data":"12258c9f04ca0b87cd791bdb5a5b2aaac12902c58960591bd1bdff68d30f5575"} Nov 21 13:49:01 crc kubenswrapper[5133]: I1121 13:49:01.136903 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gwctl" event={"ID":"d35fdb2a-b5cc-4bdb-84c9-d447c6e86842","Type":"ContainerStarted","Data":"dd25a4e4e1f51e0475fb6b1fb44c41216328ba8a0472307e6f8c1e19a4c1f4d4"} Nov 21 13:49:01 crc kubenswrapper[5133]: I1121 13:49:01.139935 5133 generic.go:334] "Generic (PLEG): container finished" podID="3f8cf09c-4d75-475f-b3d9-9f0b892f0bc4" containerID="fef787e410773c3ff62e281bb800a6541df6ab476b7d1a7a28c26c5eb5d66ee6" exitCode=0 Nov 21 13:49:01 crc kubenswrapper[5133]: I1121 13:49:01.140030 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jtgkd" event={"ID":"3f8cf09c-4d75-475f-b3d9-9f0b892f0bc4","Type":"ContainerDied","Data":"fef787e410773c3ff62e281bb800a6541df6ab476b7d1a7a28c26c5eb5d66ee6"} Nov 21 13:49:01 crc kubenswrapper[5133]: I1121 13:49:01.144988 5133 generic.go:334] "Generic (PLEG): container finished" podID="7c7ae814-704e-4480-8297-6a5309c94c22" containerID="8aebd595a73aedda3f9db84405b72eacbc0110a9c17057e59a2dcb5fe45a2840" exitCode=0 Nov 21 13:49:01 crc kubenswrapper[5133]: I1121 13:49:01.145532 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7ls26" event={"ID":"7c7ae814-704e-4480-8297-6a5309c94c22","Type":"ContainerDied","Data":"8aebd595a73aedda3f9db84405b72eacbc0110a9c17057e59a2dcb5fe45a2840"} Nov 21 13:49:01 crc kubenswrapper[5133]: I1121 13:49:01.152294 5133 generic.go:334] "Generic (PLEG): container finished" podID="7e13b754-a463-4945-95dd-5bbac447c494" containerID="3c8304d204bcf128bed6633aac9cecdd7e3da4a45fe3062f1359544094b9d609" exitCode=0 Nov 21 13:49:01 crc kubenswrapper[5133]: I1121 13:49:01.152352 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jfgzq" event={"ID":"7e13b754-a463-4945-95dd-5bbac447c494","Type":"ContainerDied","Data":"3c8304d204bcf128bed6633aac9cecdd7e3da4a45fe3062f1359544094b9d609"} Nov 21 13:49:01 crc kubenswrapper[5133]: I1121 13:49:01.152393 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jfgzq" event={"ID":"7e13b754-a463-4945-95dd-5bbac447c494","Type":"ContainerStarted","Data":"306f56fda6ea768237ac5ce2b948def5bc0b0f85e2e69198890b93692ac9785e"} Nov 21 13:49:02 crc kubenswrapper[5133]: I1121 13:49:02.163452 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7ls26" event={"ID":"7c7ae814-704e-4480-8297-6a5309c94c22","Type":"ContainerStarted","Data":"d02bafa0eb7410168e99b8841cd14d9705406d3a0c7715107790d51fa1fe3c9c"} Nov 21 13:49:02 crc kubenswrapper[5133]: I1121 13:49:02.167939 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jtgkd" event={"ID":"3f8cf09c-4d75-475f-b3d9-9f0b892f0bc4","Type":"ContainerStarted","Data":"e4c85ea648ba4213ab1e8cdb7b24bf45bf98f13af3901f280104151e61eee594"} Nov 21 13:49:02 crc kubenswrapper[5133]: I1121 13:49:02.185726 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-7ls26" podStartSLOduration=2.7434459049999997 podStartE2EDuration="5.185703664s" podCreationTimestamp="2025-11-21 13:48:57 +0000 UTC" firstStartedPulling="2025-11-21 13:48:59.125289044 +0000 UTC m=+398.923121292" lastFinishedPulling="2025-11-21 13:49:01.567546793 +0000 UTC m=+401.365379051" observedRunningTime="2025-11-21 13:49:02.181549294 +0000 UTC m=+401.979381592" watchObservedRunningTime="2025-11-21 13:49:02.185703664 +0000 UTC m=+401.983535912" Nov 21 13:49:02 crc kubenswrapper[5133]: I1121 13:49:02.213869 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-jtgkd" podStartSLOduration=2.34810601 podStartE2EDuration="5.213848371s" podCreationTimestamp="2025-11-21 13:48:57 +0000 UTC" firstStartedPulling="2025-11-21 13:48:59.121775191 +0000 UTC m=+398.919607439" lastFinishedPulling="2025-11-21 13:49:01.987517552 +0000 UTC m=+401.785349800" observedRunningTime="2025-11-21 13:49:02.212501325 +0000 UTC m=+402.010333573" watchObservedRunningTime="2025-11-21 13:49:02.213848371 +0000 UTC m=+402.011680619" Nov 21 13:49:03 crc kubenswrapper[5133]: I1121 13:49:03.176641 5133 generic.go:334] "Generic (PLEG): container finished" podID="d35fdb2a-b5cc-4bdb-84c9-d447c6e86842" containerID="9834329efa71ddf7b6fd0a5ffea13c1a94183aa59ca056300ab8eeda2e4ea3bb" exitCode=0 Nov 21 13:49:03 crc kubenswrapper[5133]: I1121 13:49:03.177045 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gwctl" event={"ID":"d35fdb2a-b5cc-4bdb-84c9-d447c6e86842","Type":"ContainerDied","Data":"9834329efa71ddf7b6fd0a5ffea13c1a94183aa59ca056300ab8eeda2e4ea3bb"} Nov 21 13:49:03 crc kubenswrapper[5133]: I1121 13:49:03.179639 5133 generic.go:334] "Generic (PLEG): container finished" podID="7e13b754-a463-4945-95dd-5bbac447c494" containerID="448227f50812b46d7bb6245794a241aa7063955e91fec8a681a7c93fe22a6eb6" exitCode=0 Nov 21 13:49:03 crc kubenswrapper[5133]: I1121 13:49:03.179699 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jfgzq" event={"ID":"7e13b754-a463-4945-95dd-5bbac447c494","Type":"ContainerDied","Data":"448227f50812b46d7bb6245794a241aa7063955e91fec8a681a7c93fe22a6eb6"} Nov 21 13:49:05 crc kubenswrapper[5133]: I1121 13:49:05.197131 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gwctl" event={"ID":"d35fdb2a-b5cc-4bdb-84c9-d447c6e86842","Type":"ContainerStarted","Data":"d4f1d4992eb30f43a9970c10af9422be9363628f9270e9f136ef49ce734d5139"} Nov 21 13:49:05 crc kubenswrapper[5133]: I1121 13:49:05.200982 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jfgzq" event={"ID":"7e13b754-a463-4945-95dd-5bbac447c494","Type":"ContainerStarted","Data":"97c76c7cabde21159103ceef6aa762a40b58a23010fc8b615cb5b1d4fee06e29"} Nov 21 13:49:05 crc kubenswrapper[5133]: I1121 13:49:05.228357 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-gwctl" podStartSLOduration=3.7313289899999997 podStartE2EDuration="6.228333402s" podCreationTimestamp="2025-11-21 13:48:59 +0000 UTC" firstStartedPulling="2025-11-21 13:49:01.139690074 +0000 UTC m=+400.937522352" lastFinishedPulling="2025-11-21 13:49:03.636694506 +0000 UTC m=+403.434526764" observedRunningTime="2025-11-21 13:49:05.224086519 +0000 UTC m=+405.021918787" watchObservedRunningTime="2025-11-21 13:49:05.228333402 +0000 UTC m=+405.026165640" Nov 21 13:49:05 crc kubenswrapper[5133]: I1121 13:49:05.249293 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-jfgzq" podStartSLOduration=3.8275884060000003 podStartE2EDuration="6.249266838s" podCreationTimestamp="2025-11-21 13:48:59 +0000 UTC" firstStartedPulling="2025-11-21 13:49:01.155196466 +0000 UTC m=+400.953028714" lastFinishedPulling="2025-11-21 13:49:03.576874898 +0000 UTC m=+403.374707146" observedRunningTime="2025-11-21 13:49:05.247652385 +0000 UTC m=+405.045484623" watchObservedRunningTime="2025-11-21 13:49:05.249266838 +0000 UTC m=+405.047099086" Nov 21 13:49:07 crc kubenswrapper[5133]: I1121 13:49:07.715246 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-jtgkd" Nov 21 13:49:07 crc kubenswrapper[5133]: I1121 13:49:07.715759 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-jtgkd" Nov 21 13:49:07 crc kubenswrapper[5133]: I1121 13:49:07.769246 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-jtgkd" Nov 21 13:49:07 crc kubenswrapper[5133]: I1121 13:49:07.908767 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-7ls26" Nov 21 13:49:07 crc kubenswrapper[5133]: I1121 13:49:07.909208 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-7ls26" Nov 21 13:49:07 crc kubenswrapper[5133]: I1121 13:49:07.959025 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-7ls26" Nov 21 13:49:08 crc kubenswrapper[5133]: I1121 13:49:08.268308 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-7ls26" Nov 21 13:49:08 crc kubenswrapper[5133]: I1121 13:49:08.277158 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-jtgkd" Nov 21 13:49:10 crc kubenswrapper[5133]: I1121 13:49:10.115806 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-gwctl" Nov 21 13:49:10 crc kubenswrapper[5133]: I1121 13:49:10.116406 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-gwctl" Nov 21 13:49:10 crc kubenswrapper[5133]: I1121 13:49:10.167814 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-gwctl" Nov 21 13:49:10 crc kubenswrapper[5133]: I1121 13:49:10.267109 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-gwctl" Nov 21 13:49:10 crc kubenswrapper[5133]: I1121 13:49:10.318889 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-jfgzq" Nov 21 13:49:10 crc kubenswrapper[5133]: I1121 13:49:10.318939 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-jfgzq" Nov 21 13:49:10 crc kubenswrapper[5133]: I1121 13:49:10.380682 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-jfgzq" Nov 21 13:49:11 crc kubenswrapper[5133]: I1121 13:49:11.275661 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-jfgzq" Nov 21 13:49:18 crc kubenswrapper[5133]: I1121 13:49:18.672712 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-kwlzv"] Nov 21 13:49:18 crc kubenswrapper[5133]: I1121 13:49:18.673829 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-kwlzv" Nov 21 13:49:18 crc kubenswrapper[5133]: I1121 13:49:18.702563 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-kwlzv"] Nov 21 13:49:18 crc kubenswrapper[5133]: I1121 13:49:18.845663 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/dfa9f606-d53e-4df6-a5ef-ee3a579df07e-trusted-ca\") pod \"image-registry-66df7c8f76-kwlzv\" (UID: \"dfa9f606-d53e-4df6-a5ef-ee3a579df07e\") " pod="openshift-image-registry/image-registry-66df7c8f76-kwlzv" Nov 21 13:49:18 crc kubenswrapper[5133]: I1121 13:49:18.845746 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/dfa9f606-d53e-4df6-a5ef-ee3a579df07e-bound-sa-token\") pod \"image-registry-66df7c8f76-kwlzv\" (UID: \"dfa9f606-d53e-4df6-a5ef-ee3a579df07e\") " pod="openshift-image-registry/image-registry-66df7c8f76-kwlzv" Nov 21 13:49:18 crc kubenswrapper[5133]: I1121 13:49:18.845788 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/dfa9f606-d53e-4df6-a5ef-ee3a579df07e-installation-pull-secrets\") pod \"image-registry-66df7c8f76-kwlzv\" (UID: \"dfa9f606-d53e-4df6-a5ef-ee3a579df07e\") " pod="openshift-image-registry/image-registry-66df7c8f76-kwlzv" Nov 21 13:49:18 crc kubenswrapper[5133]: I1121 13:49:18.845809 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/dfa9f606-d53e-4df6-a5ef-ee3a579df07e-registry-tls\") pod \"image-registry-66df7c8f76-kwlzv\" (UID: \"dfa9f606-d53e-4df6-a5ef-ee3a579df07e\") " pod="openshift-image-registry/image-registry-66df7c8f76-kwlzv" Nov 21 13:49:18 crc kubenswrapper[5133]: I1121 13:49:18.845836 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d5wbl\" (UniqueName: \"kubernetes.io/projected/dfa9f606-d53e-4df6-a5ef-ee3a579df07e-kube-api-access-d5wbl\") pod \"image-registry-66df7c8f76-kwlzv\" (UID: \"dfa9f606-d53e-4df6-a5ef-ee3a579df07e\") " pod="openshift-image-registry/image-registry-66df7c8f76-kwlzv" Nov 21 13:49:18 crc kubenswrapper[5133]: I1121 13:49:18.845870 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-kwlzv\" (UID: \"dfa9f606-d53e-4df6-a5ef-ee3a579df07e\") " pod="openshift-image-registry/image-registry-66df7c8f76-kwlzv" Nov 21 13:49:18 crc kubenswrapper[5133]: I1121 13:49:18.845907 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/dfa9f606-d53e-4df6-a5ef-ee3a579df07e-registry-certificates\") pod \"image-registry-66df7c8f76-kwlzv\" (UID: \"dfa9f606-d53e-4df6-a5ef-ee3a579df07e\") " pod="openshift-image-registry/image-registry-66df7c8f76-kwlzv" Nov 21 13:49:18 crc kubenswrapper[5133]: I1121 13:49:18.845938 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/dfa9f606-d53e-4df6-a5ef-ee3a579df07e-ca-trust-extracted\") pod \"image-registry-66df7c8f76-kwlzv\" (UID: \"dfa9f606-d53e-4df6-a5ef-ee3a579df07e\") " pod="openshift-image-registry/image-registry-66df7c8f76-kwlzv" Nov 21 13:49:18 crc kubenswrapper[5133]: I1121 13:49:18.874012 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-kwlzv\" (UID: \"dfa9f606-d53e-4df6-a5ef-ee3a579df07e\") " pod="openshift-image-registry/image-registry-66df7c8f76-kwlzv" Nov 21 13:49:18 crc kubenswrapper[5133]: I1121 13:49:18.947361 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/dfa9f606-d53e-4df6-a5ef-ee3a579df07e-ca-trust-extracted\") pod \"image-registry-66df7c8f76-kwlzv\" (UID: \"dfa9f606-d53e-4df6-a5ef-ee3a579df07e\") " pod="openshift-image-registry/image-registry-66df7c8f76-kwlzv" Nov 21 13:49:18 crc kubenswrapper[5133]: I1121 13:49:18.947442 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/dfa9f606-d53e-4df6-a5ef-ee3a579df07e-trusted-ca\") pod \"image-registry-66df7c8f76-kwlzv\" (UID: \"dfa9f606-d53e-4df6-a5ef-ee3a579df07e\") " pod="openshift-image-registry/image-registry-66df7c8f76-kwlzv" Nov 21 13:49:18 crc kubenswrapper[5133]: I1121 13:49:18.947487 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/dfa9f606-d53e-4df6-a5ef-ee3a579df07e-bound-sa-token\") pod \"image-registry-66df7c8f76-kwlzv\" (UID: \"dfa9f606-d53e-4df6-a5ef-ee3a579df07e\") " pod="openshift-image-registry/image-registry-66df7c8f76-kwlzv" Nov 21 13:49:18 crc kubenswrapper[5133]: I1121 13:49:18.947526 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/dfa9f606-d53e-4df6-a5ef-ee3a579df07e-installation-pull-secrets\") pod \"image-registry-66df7c8f76-kwlzv\" (UID: \"dfa9f606-d53e-4df6-a5ef-ee3a579df07e\") " pod="openshift-image-registry/image-registry-66df7c8f76-kwlzv" Nov 21 13:49:18 crc kubenswrapper[5133]: I1121 13:49:18.947560 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/dfa9f606-d53e-4df6-a5ef-ee3a579df07e-registry-tls\") pod \"image-registry-66df7c8f76-kwlzv\" (UID: \"dfa9f606-d53e-4df6-a5ef-ee3a579df07e\") " pod="openshift-image-registry/image-registry-66df7c8f76-kwlzv" Nov 21 13:49:18 crc kubenswrapper[5133]: I1121 13:49:18.947587 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d5wbl\" (UniqueName: \"kubernetes.io/projected/dfa9f606-d53e-4df6-a5ef-ee3a579df07e-kube-api-access-d5wbl\") pod \"image-registry-66df7c8f76-kwlzv\" (UID: \"dfa9f606-d53e-4df6-a5ef-ee3a579df07e\") " pod="openshift-image-registry/image-registry-66df7c8f76-kwlzv" Nov 21 13:49:18 crc kubenswrapper[5133]: I1121 13:49:18.947641 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/dfa9f606-d53e-4df6-a5ef-ee3a579df07e-registry-certificates\") pod \"image-registry-66df7c8f76-kwlzv\" (UID: \"dfa9f606-d53e-4df6-a5ef-ee3a579df07e\") " pod="openshift-image-registry/image-registry-66df7c8f76-kwlzv" Nov 21 13:49:18 crc kubenswrapper[5133]: I1121 13:49:18.948055 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/dfa9f606-d53e-4df6-a5ef-ee3a579df07e-ca-trust-extracted\") pod \"image-registry-66df7c8f76-kwlzv\" (UID: \"dfa9f606-d53e-4df6-a5ef-ee3a579df07e\") " pod="openshift-image-registry/image-registry-66df7c8f76-kwlzv" Nov 21 13:49:18 crc kubenswrapper[5133]: I1121 13:49:18.948939 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/dfa9f606-d53e-4df6-a5ef-ee3a579df07e-trusted-ca\") pod \"image-registry-66df7c8f76-kwlzv\" (UID: \"dfa9f606-d53e-4df6-a5ef-ee3a579df07e\") " pod="openshift-image-registry/image-registry-66df7c8f76-kwlzv" Nov 21 13:49:18 crc kubenswrapper[5133]: I1121 13:49:18.948948 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/dfa9f606-d53e-4df6-a5ef-ee3a579df07e-registry-certificates\") pod \"image-registry-66df7c8f76-kwlzv\" (UID: \"dfa9f606-d53e-4df6-a5ef-ee3a579df07e\") " pod="openshift-image-registry/image-registry-66df7c8f76-kwlzv" Nov 21 13:49:18 crc kubenswrapper[5133]: I1121 13:49:18.955211 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/dfa9f606-d53e-4df6-a5ef-ee3a579df07e-registry-tls\") pod \"image-registry-66df7c8f76-kwlzv\" (UID: \"dfa9f606-d53e-4df6-a5ef-ee3a579df07e\") " pod="openshift-image-registry/image-registry-66df7c8f76-kwlzv" Nov 21 13:49:18 crc kubenswrapper[5133]: I1121 13:49:18.955209 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/dfa9f606-d53e-4df6-a5ef-ee3a579df07e-installation-pull-secrets\") pod \"image-registry-66df7c8f76-kwlzv\" (UID: \"dfa9f606-d53e-4df6-a5ef-ee3a579df07e\") " pod="openshift-image-registry/image-registry-66df7c8f76-kwlzv" Nov 21 13:49:18 crc kubenswrapper[5133]: I1121 13:49:18.967303 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/dfa9f606-d53e-4df6-a5ef-ee3a579df07e-bound-sa-token\") pod \"image-registry-66df7c8f76-kwlzv\" (UID: \"dfa9f606-d53e-4df6-a5ef-ee3a579df07e\") " pod="openshift-image-registry/image-registry-66df7c8f76-kwlzv" Nov 21 13:49:18 crc kubenswrapper[5133]: I1121 13:49:18.970823 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d5wbl\" (UniqueName: \"kubernetes.io/projected/dfa9f606-d53e-4df6-a5ef-ee3a579df07e-kube-api-access-d5wbl\") pod \"image-registry-66df7c8f76-kwlzv\" (UID: \"dfa9f606-d53e-4df6-a5ef-ee3a579df07e\") " pod="openshift-image-registry/image-registry-66df7c8f76-kwlzv" Nov 21 13:49:18 crc kubenswrapper[5133]: I1121 13:49:18.992266 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-kwlzv" Nov 21 13:49:19 crc kubenswrapper[5133]: I1121 13:49:19.217912 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-kwlzv"] Nov 21 13:49:19 crc kubenswrapper[5133]: I1121 13:49:19.281434 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-kwlzv" event={"ID":"dfa9f606-d53e-4df6-a5ef-ee3a579df07e","Type":"ContainerStarted","Data":"a37f1a270675a76b1625b311a50ba690b499deca49a78f640f8014c9066985aa"} Nov 21 13:49:21 crc kubenswrapper[5133]: I1121 13:49:21.295832 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-kwlzv" event={"ID":"dfa9f606-d53e-4df6-a5ef-ee3a579df07e","Type":"ContainerStarted","Data":"0b5e26c1b472120c8b6d210bb07fa7b0b67f4fa605726ee1850446e6594e7c46"} Nov 21 13:49:21 crc kubenswrapper[5133]: I1121 13:49:21.296341 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-kwlzv" Nov 21 13:49:21 crc kubenswrapper[5133]: I1121 13:49:21.322331 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-kwlzv" podStartSLOduration=3.322304726 podStartE2EDuration="3.322304726s" podCreationTimestamp="2025-11-21 13:49:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 13:49:21.320085668 +0000 UTC m=+421.117917936" watchObservedRunningTime="2025-11-21 13:49:21.322304726 +0000 UTC m=+421.120136974" Nov 21 13:49:21 crc kubenswrapper[5133]: I1121 13:49:21.487046 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-ggrbw" podUID="9b4d3b1f-5c64-4f18-91ca-d70893516609" containerName="oauth-openshift" containerID="cri-o://1b16ef56ff44170737e4ac90548fe825b34d555b37d47fd869e68e9bda87dc4a" gracePeriod=15 Nov 21 13:49:21 crc kubenswrapper[5133]: I1121 13:49:21.928962 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-ggrbw" Nov 21 13:49:21 crc kubenswrapper[5133]: I1121 13:49:21.963731 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-59dd575687-89pgm"] Nov 21 13:49:21 crc kubenswrapper[5133]: E1121 13:49:21.964065 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b4d3b1f-5c64-4f18-91ca-d70893516609" containerName="oauth-openshift" Nov 21 13:49:21 crc kubenswrapper[5133]: I1121 13:49:21.964082 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b4d3b1f-5c64-4f18-91ca-d70893516609" containerName="oauth-openshift" Nov 21 13:49:21 crc kubenswrapper[5133]: I1121 13:49:21.964231 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b4d3b1f-5c64-4f18-91ca-d70893516609" containerName="oauth-openshift" Nov 21 13:49:21 crc kubenswrapper[5133]: I1121 13:49:21.964739 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-59dd575687-89pgm" Nov 21 13:49:21 crc kubenswrapper[5133]: I1121 13:49:21.985786 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-59dd575687-89pgm"] Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.114227 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/9b4d3b1f-5c64-4f18-91ca-d70893516609-v4-0-config-user-template-login\") pod \"9b4d3b1f-5c64-4f18-91ca-d70893516609\" (UID: \"9b4d3b1f-5c64-4f18-91ca-d70893516609\") " Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.114300 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/9b4d3b1f-5c64-4f18-91ca-d70893516609-audit-dir\") pod \"9b4d3b1f-5c64-4f18-91ca-d70893516609\" (UID: \"9b4d3b1f-5c64-4f18-91ca-d70893516609\") " Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.114356 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m74kk\" (UniqueName: \"kubernetes.io/projected/9b4d3b1f-5c64-4f18-91ca-d70893516609-kube-api-access-m74kk\") pod \"9b4d3b1f-5c64-4f18-91ca-d70893516609\" (UID: \"9b4d3b1f-5c64-4f18-91ca-d70893516609\") " Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.114382 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/9b4d3b1f-5c64-4f18-91ca-d70893516609-v4-0-config-system-service-ca\") pod \"9b4d3b1f-5c64-4f18-91ca-d70893516609\" (UID: \"9b4d3b1f-5c64-4f18-91ca-d70893516609\") " Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.114437 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9b4d3b1f-5c64-4f18-91ca-d70893516609-v4-0-config-system-trusted-ca-bundle\") pod \"9b4d3b1f-5c64-4f18-91ca-d70893516609\" (UID: \"9b4d3b1f-5c64-4f18-91ca-d70893516609\") " Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.114466 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/9b4d3b1f-5c64-4f18-91ca-d70893516609-v4-0-config-user-template-error\") pod \"9b4d3b1f-5c64-4f18-91ca-d70893516609\" (UID: \"9b4d3b1f-5c64-4f18-91ca-d70893516609\") " Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.114491 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/9b4d3b1f-5c64-4f18-91ca-d70893516609-v4-0-config-user-template-provider-selection\") pod \"9b4d3b1f-5c64-4f18-91ca-d70893516609\" (UID: \"9b4d3b1f-5c64-4f18-91ca-d70893516609\") " Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.114518 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/9b4d3b1f-5c64-4f18-91ca-d70893516609-v4-0-config-system-ocp-branding-template\") pod \"9b4d3b1f-5c64-4f18-91ca-d70893516609\" (UID: \"9b4d3b1f-5c64-4f18-91ca-d70893516609\") " Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.114542 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/9b4d3b1f-5c64-4f18-91ca-d70893516609-v4-0-config-system-cliconfig\") pod \"9b4d3b1f-5c64-4f18-91ca-d70893516609\" (UID: \"9b4d3b1f-5c64-4f18-91ca-d70893516609\") " Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.114577 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/9b4d3b1f-5c64-4f18-91ca-d70893516609-v4-0-config-system-serving-cert\") pod \"9b4d3b1f-5c64-4f18-91ca-d70893516609\" (UID: \"9b4d3b1f-5c64-4f18-91ca-d70893516609\") " Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.114549 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9b4d3b1f-5c64-4f18-91ca-d70893516609-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "9b4d3b1f-5c64-4f18-91ca-d70893516609" (UID: "9b4d3b1f-5c64-4f18-91ca-d70893516609"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.114605 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/9b4d3b1f-5c64-4f18-91ca-d70893516609-audit-policies\") pod \"9b4d3b1f-5c64-4f18-91ca-d70893516609\" (UID: \"9b4d3b1f-5c64-4f18-91ca-d70893516609\") " Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.114934 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/9b4d3b1f-5c64-4f18-91ca-d70893516609-v4-0-config-system-router-certs\") pod \"9b4d3b1f-5c64-4f18-91ca-d70893516609\" (UID: \"9b4d3b1f-5c64-4f18-91ca-d70893516609\") " Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.114982 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/9b4d3b1f-5c64-4f18-91ca-d70893516609-v4-0-config-system-session\") pod \"9b4d3b1f-5c64-4f18-91ca-d70893516609\" (UID: \"9b4d3b1f-5c64-4f18-91ca-d70893516609\") " Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.115094 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/9b4d3b1f-5c64-4f18-91ca-d70893516609-v4-0-config-user-idp-0-file-data\") pod \"9b4d3b1f-5c64-4f18-91ca-d70893516609\" (UID: \"9b4d3b1f-5c64-4f18-91ca-d70893516609\") " Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.115472 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/fbf5134f-f1a2-4355-9b29-4161baafef6b-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-59dd575687-89pgm\" (UID: \"fbf5134f-f1a2-4355-9b29-4161baafef6b\") " pod="openshift-authentication/oauth-openshift-59dd575687-89pgm" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.115549 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mwhk5\" (UniqueName: \"kubernetes.io/projected/fbf5134f-f1a2-4355-9b29-4161baafef6b-kube-api-access-mwhk5\") pod \"oauth-openshift-59dd575687-89pgm\" (UID: \"fbf5134f-f1a2-4355-9b29-4161baafef6b\") " pod="openshift-authentication/oauth-openshift-59dd575687-89pgm" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.115621 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/fbf5134f-f1a2-4355-9b29-4161baafef6b-v4-0-config-user-template-error\") pod \"oauth-openshift-59dd575687-89pgm\" (UID: \"fbf5134f-f1a2-4355-9b29-4161baafef6b\") " pod="openshift-authentication/oauth-openshift-59dd575687-89pgm" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.115702 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9b4d3b1f-5c64-4f18-91ca-d70893516609-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "9b4d3b1f-5c64-4f18-91ca-d70893516609" (UID: "9b4d3b1f-5c64-4f18-91ca-d70893516609"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.115728 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/fbf5134f-f1a2-4355-9b29-4161baafef6b-v4-0-config-system-router-certs\") pod \"oauth-openshift-59dd575687-89pgm\" (UID: \"fbf5134f-f1a2-4355-9b29-4161baafef6b\") " pod="openshift-authentication/oauth-openshift-59dd575687-89pgm" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.115778 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/fbf5134f-f1a2-4355-9b29-4161baafef6b-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-59dd575687-89pgm\" (UID: \"fbf5134f-f1a2-4355-9b29-4161baafef6b\") " pod="openshift-authentication/oauth-openshift-59dd575687-89pgm" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.115864 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fbf5134f-f1a2-4355-9b29-4161baafef6b-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-59dd575687-89pgm\" (UID: \"fbf5134f-f1a2-4355-9b29-4161baafef6b\") " pod="openshift-authentication/oauth-openshift-59dd575687-89pgm" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.116452 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9b4d3b1f-5c64-4f18-91ca-d70893516609-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "9b4d3b1f-5c64-4f18-91ca-d70893516609" (UID: "9b4d3b1f-5c64-4f18-91ca-d70893516609"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.116443 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9b4d3b1f-5c64-4f18-91ca-d70893516609-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "9b4d3b1f-5c64-4f18-91ca-d70893516609" (UID: "9b4d3b1f-5c64-4f18-91ca-d70893516609"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.116736 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9b4d3b1f-5c64-4f18-91ca-d70893516609-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "9b4d3b1f-5c64-4f18-91ca-d70893516609" (UID: "9b4d3b1f-5c64-4f18-91ca-d70893516609"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.117022 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/fbf5134f-f1a2-4355-9b29-4161baafef6b-audit-dir\") pod \"oauth-openshift-59dd575687-89pgm\" (UID: \"fbf5134f-f1a2-4355-9b29-4161baafef6b\") " pod="openshift-authentication/oauth-openshift-59dd575687-89pgm" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.117072 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/fbf5134f-f1a2-4355-9b29-4161baafef6b-audit-policies\") pod \"oauth-openshift-59dd575687-89pgm\" (UID: \"fbf5134f-f1a2-4355-9b29-4161baafef6b\") " pod="openshift-authentication/oauth-openshift-59dd575687-89pgm" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.117130 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/fbf5134f-f1a2-4355-9b29-4161baafef6b-v4-0-config-system-cliconfig\") pod \"oauth-openshift-59dd575687-89pgm\" (UID: \"fbf5134f-f1a2-4355-9b29-4161baafef6b\") " pod="openshift-authentication/oauth-openshift-59dd575687-89pgm" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.117175 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/fbf5134f-f1a2-4355-9b29-4161baafef6b-v4-0-config-user-template-login\") pod \"oauth-openshift-59dd575687-89pgm\" (UID: \"fbf5134f-f1a2-4355-9b29-4161baafef6b\") " pod="openshift-authentication/oauth-openshift-59dd575687-89pgm" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.117226 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/fbf5134f-f1a2-4355-9b29-4161baafef6b-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-59dd575687-89pgm\" (UID: \"fbf5134f-f1a2-4355-9b29-4161baafef6b\") " pod="openshift-authentication/oauth-openshift-59dd575687-89pgm" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.117271 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/fbf5134f-f1a2-4355-9b29-4161baafef6b-v4-0-config-system-session\") pod \"oauth-openshift-59dd575687-89pgm\" (UID: \"fbf5134f-f1a2-4355-9b29-4161baafef6b\") " pod="openshift-authentication/oauth-openshift-59dd575687-89pgm" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.117298 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/fbf5134f-f1a2-4355-9b29-4161baafef6b-v4-0-config-system-serving-cert\") pod \"oauth-openshift-59dd575687-89pgm\" (UID: \"fbf5134f-f1a2-4355-9b29-4161baafef6b\") " pod="openshift-authentication/oauth-openshift-59dd575687-89pgm" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.117323 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/fbf5134f-f1a2-4355-9b29-4161baafef6b-v4-0-config-system-service-ca\") pod \"oauth-openshift-59dd575687-89pgm\" (UID: \"fbf5134f-f1a2-4355-9b29-4161baafef6b\") " pod="openshift-authentication/oauth-openshift-59dd575687-89pgm" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.117401 5133 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9b4d3b1f-5c64-4f18-91ca-d70893516609-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.117420 5133 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/9b4d3b1f-5c64-4f18-91ca-d70893516609-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.117437 5133 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/9b4d3b1f-5c64-4f18-91ca-d70893516609-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.117459 5133 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/9b4d3b1f-5c64-4f18-91ca-d70893516609-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.117477 5133 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/9b4d3b1f-5c64-4f18-91ca-d70893516609-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.123450 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b4d3b1f-5c64-4f18-91ca-d70893516609-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "9b4d3b1f-5c64-4f18-91ca-d70893516609" (UID: "9b4d3b1f-5c64-4f18-91ca-d70893516609"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.123505 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9b4d3b1f-5c64-4f18-91ca-d70893516609-kube-api-access-m74kk" (OuterVolumeSpecName: "kube-api-access-m74kk") pod "9b4d3b1f-5c64-4f18-91ca-d70893516609" (UID: "9b4d3b1f-5c64-4f18-91ca-d70893516609"). InnerVolumeSpecName "kube-api-access-m74kk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.124696 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b4d3b1f-5c64-4f18-91ca-d70893516609-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "9b4d3b1f-5c64-4f18-91ca-d70893516609" (UID: "9b4d3b1f-5c64-4f18-91ca-d70893516609"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.130320 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b4d3b1f-5c64-4f18-91ca-d70893516609-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "9b4d3b1f-5c64-4f18-91ca-d70893516609" (UID: "9b4d3b1f-5c64-4f18-91ca-d70893516609"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.131497 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b4d3b1f-5c64-4f18-91ca-d70893516609-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "9b4d3b1f-5c64-4f18-91ca-d70893516609" (UID: "9b4d3b1f-5c64-4f18-91ca-d70893516609"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.132974 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b4d3b1f-5c64-4f18-91ca-d70893516609-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "9b4d3b1f-5c64-4f18-91ca-d70893516609" (UID: "9b4d3b1f-5c64-4f18-91ca-d70893516609"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.133877 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b4d3b1f-5c64-4f18-91ca-d70893516609-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "9b4d3b1f-5c64-4f18-91ca-d70893516609" (UID: "9b4d3b1f-5c64-4f18-91ca-d70893516609"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.137502 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b4d3b1f-5c64-4f18-91ca-d70893516609-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "9b4d3b1f-5c64-4f18-91ca-d70893516609" (UID: "9b4d3b1f-5c64-4f18-91ca-d70893516609"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.137966 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b4d3b1f-5c64-4f18-91ca-d70893516609-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "9b4d3b1f-5c64-4f18-91ca-d70893516609" (UID: "9b4d3b1f-5c64-4f18-91ca-d70893516609"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.219127 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fbf5134f-f1a2-4355-9b29-4161baafef6b-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-59dd575687-89pgm\" (UID: \"fbf5134f-f1a2-4355-9b29-4161baafef6b\") " pod="openshift-authentication/oauth-openshift-59dd575687-89pgm" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.219211 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/fbf5134f-f1a2-4355-9b29-4161baafef6b-audit-dir\") pod \"oauth-openshift-59dd575687-89pgm\" (UID: \"fbf5134f-f1a2-4355-9b29-4161baafef6b\") " pod="openshift-authentication/oauth-openshift-59dd575687-89pgm" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.219245 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/fbf5134f-f1a2-4355-9b29-4161baafef6b-audit-policies\") pod \"oauth-openshift-59dd575687-89pgm\" (UID: \"fbf5134f-f1a2-4355-9b29-4161baafef6b\") " pod="openshift-authentication/oauth-openshift-59dd575687-89pgm" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.219283 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/fbf5134f-f1a2-4355-9b29-4161baafef6b-v4-0-config-system-cliconfig\") pod \"oauth-openshift-59dd575687-89pgm\" (UID: \"fbf5134f-f1a2-4355-9b29-4161baafef6b\") " pod="openshift-authentication/oauth-openshift-59dd575687-89pgm" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.219311 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/fbf5134f-f1a2-4355-9b29-4161baafef6b-v4-0-config-user-template-login\") pod \"oauth-openshift-59dd575687-89pgm\" (UID: \"fbf5134f-f1a2-4355-9b29-4161baafef6b\") " pod="openshift-authentication/oauth-openshift-59dd575687-89pgm" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.219338 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/fbf5134f-f1a2-4355-9b29-4161baafef6b-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-59dd575687-89pgm\" (UID: \"fbf5134f-f1a2-4355-9b29-4161baafef6b\") " pod="openshift-authentication/oauth-openshift-59dd575687-89pgm" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.219352 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/fbf5134f-f1a2-4355-9b29-4161baafef6b-audit-dir\") pod \"oauth-openshift-59dd575687-89pgm\" (UID: \"fbf5134f-f1a2-4355-9b29-4161baafef6b\") " pod="openshift-authentication/oauth-openshift-59dd575687-89pgm" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.219382 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/fbf5134f-f1a2-4355-9b29-4161baafef6b-v4-0-config-system-session\") pod \"oauth-openshift-59dd575687-89pgm\" (UID: \"fbf5134f-f1a2-4355-9b29-4161baafef6b\") " pod="openshift-authentication/oauth-openshift-59dd575687-89pgm" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.219458 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/fbf5134f-f1a2-4355-9b29-4161baafef6b-v4-0-config-system-serving-cert\") pod \"oauth-openshift-59dd575687-89pgm\" (UID: \"fbf5134f-f1a2-4355-9b29-4161baafef6b\") " pod="openshift-authentication/oauth-openshift-59dd575687-89pgm" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.219486 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/fbf5134f-f1a2-4355-9b29-4161baafef6b-v4-0-config-system-service-ca\") pod \"oauth-openshift-59dd575687-89pgm\" (UID: \"fbf5134f-f1a2-4355-9b29-4161baafef6b\") " pod="openshift-authentication/oauth-openshift-59dd575687-89pgm" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.219563 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/fbf5134f-f1a2-4355-9b29-4161baafef6b-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-59dd575687-89pgm\" (UID: \"fbf5134f-f1a2-4355-9b29-4161baafef6b\") " pod="openshift-authentication/oauth-openshift-59dd575687-89pgm" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.219605 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mwhk5\" (UniqueName: \"kubernetes.io/projected/fbf5134f-f1a2-4355-9b29-4161baafef6b-kube-api-access-mwhk5\") pod \"oauth-openshift-59dd575687-89pgm\" (UID: \"fbf5134f-f1a2-4355-9b29-4161baafef6b\") " pod="openshift-authentication/oauth-openshift-59dd575687-89pgm" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.219643 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/fbf5134f-f1a2-4355-9b29-4161baafef6b-v4-0-config-user-template-error\") pod \"oauth-openshift-59dd575687-89pgm\" (UID: \"fbf5134f-f1a2-4355-9b29-4161baafef6b\") " pod="openshift-authentication/oauth-openshift-59dd575687-89pgm" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.219728 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/fbf5134f-f1a2-4355-9b29-4161baafef6b-v4-0-config-system-router-certs\") pod \"oauth-openshift-59dd575687-89pgm\" (UID: \"fbf5134f-f1a2-4355-9b29-4161baafef6b\") " pod="openshift-authentication/oauth-openshift-59dd575687-89pgm" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.219757 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/fbf5134f-f1a2-4355-9b29-4161baafef6b-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-59dd575687-89pgm\" (UID: \"fbf5134f-f1a2-4355-9b29-4161baafef6b\") " pod="openshift-authentication/oauth-openshift-59dd575687-89pgm" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.219866 5133 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/9b4d3b1f-5c64-4f18-91ca-d70893516609-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.219884 5133 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/9b4d3b1f-5c64-4f18-91ca-d70893516609-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.219899 5133 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/9b4d3b1f-5c64-4f18-91ca-d70893516609-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.219915 5133 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/9b4d3b1f-5c64-4f18-91ca-d70893516609-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.219928 5133 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/9b4d3b1f-5c64-4f18-91ca-d70893516609-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.219939 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m74kk\" (UniqueName: \"kubernetes.io/projected/9b4d3b1f-5c64-4f18-91ca-d70893516609-kube-api-access-m74kk\") on node \"crc\" DevicePath \"\"" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.219954 5133 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/9b4d3b1f-5c64-4f18-91ca-d70893516609-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.219969 5133 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/9b4d3b1f-5c64-4f18-91ca-d70893516609-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.219983 5133 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/9b4d3b1f-5c64-4f18-91ca-d70893516609-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.220489 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/fbf5134f-f1a2-4355-9b29-4161baafef6b-v4-0-config-system-cliconfig\") pod \"oauth-openshift-59dd575687-89pgm\" (UID: \"fbf5134f-f1a2-4355-9b29-4161baafef6b\") " pod="openshift-authentication/oauth-openshift-59dd575687-89pgm" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.220579 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/fbf5134f-f1a2-4355-9b29-4161baafef6b-audit-policies\") pod \"oauth-openshift-59dd575687-89pgm\" (UID: \"fbf5134f-f1a2-4355-9b29-4161baafef6b\") " pod="openshift-authentication/oauth-openshift-59dd575687-89pgm" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.220651 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fbf5134f-f1a2-4355-9b29-4161baafef6b-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-59dd575687-89pgm\" (UID: \"fbf5134f-f1a2-4355-9b29-4161baafef6b\") " pod="openshift-authentication/oauth-openshift-59dd575687-89pgm" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.221126 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/fbf5134f-f1a2-4355-9b29-4161baafef6b-v4-0-config-system-service-ca\") pod \"oauth-openshift-59dd575687-89pgm\" (UID: \"fbf5134f-f1a2-4355-9b29-4161baafef6b\") " pod="openshift-authentication/oauth-openshift-59dd575687-89pgm" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.225032 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/fbf5134f-f1a2-4355-9b29-4161baafef6b-v4-0-config-system-serving-cert\") pod \"oauth-openshift-59dd575687-89pgm\" (UID: \"fbf5134f-f1a2-4355-9b29-4161baafef6b\") " pod="openshift-authentication/oauth-openshift-59dd575687-89pgm" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.225070 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/fbf5134f-f1a2-4355-9b29-4161baafef6b-v4-0-config-system-session\") pod \"oauth-openshift-59dd575687-89pgm\" (UID: \"fbf5134f-f1a2-4355-9b29-4161baafef6b\") " pod="openshift-authentication/oauth-openshift-59dd575687-89pgm" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.225952 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/fbf5134f-f1a2-4355-9b29-4161baafef6b-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-59dd575687-89pgm\" (UID: \"fbf5134f-f1a2-4355-9b29-4161baafef6b\") " pod="openshift-authentication/oauth-openshift-59dd575687-89pgm" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.226290 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/fbf5134f-f1a2-4355-9b29-4161baafef6b-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-59dd575687-89pgm\" (UID: \"fbf5134f-f1a2-4355-9b29-4161baafef6b\") " pod="openshift-authentication/oauth-openshift-59dd575687-89pgm" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.226330 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/fbf5134f-f1a2-4355-9b29-4161baafef6b-v4-0-config-user-template-error\") pod \"oauth-openshift-59dd575687-89pgm\" (UID: \"fbf5134f-f1a2-4355-9b29-4161baafef6b\") " pod="openshift-authentication/oauth-openshift-59dd575687-89pgm" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.226913 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/fbf5134f-f1a2-4355-9b29-4161baafef6b-v4-0-config-system-router-certs\") pod \"oauth-openshift-59dd575687-89pgm\" (UID: \"fbf5134f-f1a2-4355-9b29-4161baafef6b\") " pod="openshift-authentication/oauth-openshift-59dd575687-89pgm" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.227430 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/fbf5134f-f1a2-4355-9b29-4161baafef6b-v4-0-config-user-template-login\") pod \"oauth-openshift-59dd575687-89pgm\" (UID: \"fbf5134f-f1a2-4355-9b29-4161baafef6b\") " pod="openshift-authentication/oauth-openshift-59dd575687-89pgm" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.228283 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/fbf5134f-f1a2-4355-9b29-4161baafef6b-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-59dd575687-89pgm\" (UID: \"fbf5134f-f1a2-4355-9b29-4161baafef6b\") " pod="openshift-authentication/oauth-openshift-59dd575687-89pgm" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.238931 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mwhk5\" (UniqueName: \"kubernetes.io/projected/fbf5134f-f1a2-4355-9b29-4161baafef6b-kube-api-access-mwhk5\") pod \"oauth-openshift-59dd575687-89pgm\" (UID: \"fbf5134f-f1a2-4355-9b29-4161baafef6b\") " pod="openshift-authentication/oauth-openshift-59dd575687-89pgm" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.291532 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-59dd575687-89pgm" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.305338 5133 generic.go:334] "Generic (PLEG): container finished" podID="9b4d3b1f-5c64-4f18-91ca-d70893516609" containerID="1b16ef56ff44170737e4ac90548fe825b34d555b37d47fd869e68e9bda87dc4a" exitCode=0 Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.306444 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-ggrbw" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.307144 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-ggrbw" event={"ID":"9b4d3b1f-5c64-4f18-91ca-d70893516609","Type":"ContainerDied","Data":"1b16ef56ff44170737e4ac90548fe825b34d555b37d47fd869e68e9bda87dc4a"} Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.307203 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-ggrbw" event={"ID":"9b4d3b1f-5c64-4f18-91ca-d70893516609","Type":"ContainerDied","Data":"bfac6c65d9fb4f6957b56b65ad1b59c97019aaa419e99d041d9455cd168a0a67"} Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.307225 5133 scope.go:117] "RemoveContainer" containerID="1b16ef56ff44170737e4ac90548fe825b34d555b37d47fd869e68e9bda87dc4a" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.348190 5133 scope.go:117] "RemoveContainer" containerID="1b16ef56ff44170737e4ac90548fe825b34d555b37d47fd869e68e9bda87dc4a" Nov 21 13:49:22 crc kubenswrapper[5133]: E1121 13:49:22.349054 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1b16ef56ff44170737e4ac90548fe825b34d555b37d47fd869e68e9bda87dc4a\": container with ID starting with 1b16ef56ff44170737e4ac90548fe825b34d555b37d47fd869e68e9bda87dc4a not found: ID does not exist" containerID="1b16ef56ff44170737e4ac90548fe825b34d555b37d47fd869e68e9bda87dc4a" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.349127 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1b16ef56ff44170737e4ac90548fe825b34d555b37d47fd869e68e9bda87dc4a"} err="failed to get container status \"1b16ef56ff44170737e4ac90548fe825b34d555b37d47fd869e68e9bda87dc4a\": rpc error: code = NotFound desc = could not find container \"1b16ef56ff44170737e4ac90548fe825b34d555b37d47fd869e68e9bda87dc4a\": container with ID starting with 1b16ef56ff44170737e4ac90548fe825b34d555b37d47fd869e68e9bda87dc4a not found: ID does not exist" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.379498 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-ggrbw"] Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.382565 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-ggrbw"] Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.480694 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9b4d3b1f-5c64-4f18-91ca-d70893516609" path="/var/lib/kubelet/pods/9b4d3b1f-5c64-4f18-91ca-d70893516609/volumes" Nov 21 13:49:22 crc kubenswrapper[5133]: I1121 13:49:22.748343 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-59dd575687-89pgm"] Nov 21 13:49:23 crc kubenswrapper[5133]: I1121 13:49:23.310801 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 13:49:23 crc kubenswrapper[5133]: I1121 13:49:23.310888 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 13:49:23 crc kubenswrapper[5133]: I1121 13:49:23.312140 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-59dd575687-89pgm" event={"ID":"fbf5134f-f1a2-4355-9b29-4161baafef6b","Type":"ContainerStarted","Data":"b5cdd9c3c7a72a0a207629c412d4a0ff198fe522d93abae1dc0b60780dcd7f2f"} Nov 21 13:49:24 crc kubenswrapper[5133]: I1121 13:49:24.321329 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-59dd575687-89pgm" event={"ID":"fbf5134f-f1a2-4355-9b29-4161baafef6b","Type":"ContainerStarted","Data":"98ce340116916fce2690a890b1a0a0b8960f833af29111a5b1e74356a66fa16c"} Nov 21 13:49:24 crc kubenswrapper[5133]: I1121 13:49:24.321879 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-59dd575687-89pgm" Nov 21 13:49:24 crc kubenswrapper[5133]: I1121 13:49:24.335680 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-59dd575687-89pgm" Nov 21 13:49:24 crc kubenswrapper[5133]: I1121 13:49:24.350180 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-59dd575687-89pgm" podStartSLOduration=28.350165872 podStartE2EDuration="28.350165872s" podCreationTimestamp="2025-11-21 13:48:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 13:49:24.346737581 +0000 UTC m=+424.144569829" watchObservedRunningTime="2025-11-21 13:49:24.350165872 +0000 UTC m=+424.147998110" Nov 21 13:49:39 crc kubenswrapper[5133]: I1121 13:49:39.005058 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-kwlzv" Nov 21 13:49:39 crc kubenswrapper[5133]: I1121 13:49:39.075084 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-5rjt4"] Nov 21 13:49:53 crc kubenswrapper[5133]: I1121 13:49:53.310654 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 13:49:53 crc kubenswrapper[5133]: I1121 13:49:53.311531 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 13:49:53 crc kubenswrapper[5133]: I1121 13:49:53.311590 5133 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" Nov 21 13:49:53 crc kubenswrapper[5133]: I1121 13:49:53.312426 5133 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"dd93f35af870e12207633edd0793e79b2dcd5d4e2167c242b49f29a86ebd07a8"} pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 13:49:53 crc kubenswrapper[5133]: I1121 13:49:53.312516 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" containerID="cri-o://dd93f35af870e12207633edd0793e79b2dcd5d4e2167c242b49f29a86ebd07a8" gracePeriod=600 Nov 21 13:49:53 crc kubenswrapper[5133]: I1121 13:49:53.535161 5133 generic.go:334] "Generic (PLEG): container finished" podID="52f5a729-05d1-4f84-a216-1df3233af57d" containerID="dd93f35af870e12207633edd0793e79b2dcd5d4e2167c242b49f29a86ebd07a8" exitCode=0 Nov 21 13:49:53 crc kubenswrapper[5133]: I1121 13:49:53.535254 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" event={"ID":"52f5a729-05d1-4f84-a216-1df3233af57d","Type":"ContainerDied","Data":"dd93f35af870e12207633edd0793e79b2dcd5d4e2167c242b49f29a86ebd07a8"} Nov 21 13:49:53 crc kubenswrapper[5133]: I1121 13:49:53.535860 5133 scope.go:117] "RemoveContainer" containerID="9e5c730e837240b2ed45dff8a5411b8b49d21e7fbfb2dfcc6aef568b73b57745" Nov 21 13:49:54 crc kubenswrapper[5133]: I1121 13:49:54.544452 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" event={"ID":"52f5a729-05d1-4f84-a216-1df3233af57d","Type":"ContainerStarted","Data":"603056172cb84b83a56a09f7ae6762f8e617df9b4803981cd538b0aaab00d221"} Nov 21 13:50:04 crc kubenswrapper[5133]: I1121 13:50:04.122438 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" podUID="fc3cef5b-e125-43c6-be9e-52ae6617e01a" containerName="registry" containerID="cri-o://41a42c248eacc6cd87aa16ede50efbc988270bf60b3c42307b41f37dd4248165" gracePeriod=30 Nov 21 13:50:05 crc kubenswrapper[5133]: I1121 13:50:05.059054 5133 generic.go:334] "Generic (PLEG): container finished" podID="fc3cef5b-e125-43c6-be9e-52ae6617e01a" containerID="41a42c248eacc6cd87aa16ede50efbc988270bf60b3c42307b41f37dd4248165" exitCode=0 Nov 21 13:50:05 crc kubenswrapper[5133]: I1121 13:50:05.059114 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" event={"ID":"fc3cef5b-e125-43c6-be9e-52ae6617e01a","Type":"ContainerDied","Data":"41a42c248eacc6cd87aa16ede50efbc988270bf60b3c42307b41f37dd4248165"} Nov 21 13:50:05 crc kubenswrapper[5133]: I1121 13:50:05.277554 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:50:05 crc kubenswrapper[5133]: I1121 13:50:05.406609 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/fc3cef5b-e125-43c6-be9e-52ae6617e01a-registry-certificates\") pod \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " Nov 21 13:50:05 crc kubenswrapper[5133]: I1121 13:50:05.406696 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/fc3cef5b-e125-43c6-be9e-52ae6617e01a-ca-trust-extracted\") pod \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " Nov 21 13:50:05 crc kubenswrapper[5133]: I1121 13:50:05.406726 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fc3cef5b-e125-43c6-be9e-52ae6617e01a-trusted-ca\") pod \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " Nov 21 13:50:05 crc kubenswrapper[5133]: I1121 13:50:05.406771 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gq6sb\" (UniqueName: \"kubernetes.io/projected/fc3cef5b-e125-43c6-be9e-52ae6617e01a-kube-api-access-gq6sb\") pod \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " Nov 21 13:50:05 crc kubenswrapper[5133]: I1121 13:50:05.406792 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/fc3cef5b-e125-43c6-be9e-52ae6617e01a-installation-pull-secrets\") pod \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " Nov 21 13:50:05 crc kubenswrapper[5133]: I1121 13:50:05.406900 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " Nov 21 13:50:05 crc kubenswrapper[5133]: I1121 13:50:05.406925 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/fc3cef5b-e125-43c6-be9e-52ae6617e01a-bound-sa-token\") pod \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " Nov 21 13:50:05 crc kubenswrapper[5133]: I1121 13:50:05.406975 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/fc3cef5b-e125-43c6-be9e-52ae6617e01a-registry-tls\") pod \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\" (UID: \"fc3cef5b-e125-43c6-be9e-52ae6617e01a\") " Nov 21 13:50:05 crc kubenswrapper[5133]: I1121 13:50:05.408036 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fc3cef5b-e125-43c6-be9e-52ae6617e01a-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "fc3cef5b-e125-43c6-be9e-52ae6617e01a" (UID: "fc3cef5b-e125-43c6-be9e-52ae6617e01a"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:50:05 crc kubenswrapper[5133]: I1121 13:50:05.408095 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fc3cef5b-e125-43c6-be9e-52ae6617e01a-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "fc3cef5b-e125-43c6-be9e-52ae6617e01a" (UID: "fc3cef5b-e125-43c6-be9e-52ae6617e01a"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:50:05 crc kubenswrapper[5133]: I1121 13:50:05.413334 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fc3cef5b-e125-43c6-be9e-52ae6617e01a-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "fc3cef5b-e125-43c6-be9e-52ae6617e01a" (UID: "fc3cef5b-e125-43c6-be9e-52ae6617e01a"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:50:05 crc kubenswrapper[5133]: I1121 13:50:05.413829 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fc3cef5b-e125-43c6-be9e-52ae6617e01a-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "fc3cef5b-e125-43c6-be9e-52ae6617e01a" (UID: "fc3cef5b-e125-43c6-be9e-52ae6617e01a"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:50:05 crc kubenswrapper[5133]: I1121 13:50:05.415010 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fc3cef5b-e125-43c6-be9e-52ae6617e01a-kube-api-access-gq6sb" (OuterVolumeSpecName: "kube-api-access-gq6sb") pod "fc3cef5b-e125-43c6-be9e-52ae6617e01a" (UID: "fc3cef5b-e125-43c6-be9e-52ae6617e01a"). InnerVolumeSpecName "kube-api-access-gq6sb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:50:05 crc kubenswrapper[5133]: I1121 13:50:05.415711 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fc3cef5b-e125-43c6-be9e-52ae6617e01a-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "fc3cef5b-e125-43c6-be9e-52ae6617e01a" (UID: "fc3cef5b-e125-43c6-be9e-52ae6617e01a"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:50:05 crc kubenswrapper[5133]: I1121 13:50:05.429341 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fc3cef5b-e125-43c6-be9e-52ae6617e01a-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "fc3cef5b-e125-43c6-be9e-52ae6617e01a" (UID: "fc3cef5b-e125-43c6-be9e-52ae6617e01a"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 13:50:05 crc kubenswrapper[5133]: I1121 13:50:05.437911 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "fc3cef5b-e125-43c6-be9e-52ae6617e01a" (UID: "fc3cef5b-e125-43c6-be9e-52ae6617e01a"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 21 13:50:05 crc kubenswrapper[5133]: I1121 13:50:05.508125 5133 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/fc3cef5b-e125-43c6-be9e-52ae6617e01a-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 21 13:50:05 crc kubenswrapper[5133]: I1121 13:50:05.508164 5133 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/fc3cef5b-e125-43c6-be9e-52ae6617e01a-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 21 13:50:05 crc kubenswrapper[5133]: I1121 13:50:05.508176 5133 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/fc3cef5b-e125-43c6-be9e-52ae6617e01a-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 21 13:50:05 crc kubenswrapper[5133]: I1121 13:50:05.508207 5133 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fc3cef5b-e125-43c6-be9e-52ae6617e01a-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 21 13:50:05 crc kubenswrapper[5133]: I1121 13:50:05.508217 5133 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/fc3cef5b-e125-43c6-be9e-52ae6617e01a-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 21 13:50:05 crc kubenswrapper[5133]: I1121 13:50:05.508226 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gq6sb\" (UniqueName: \"kubernetes.io/projected/fc3cef5b-e125-43c6-be9e-52ae6617e01a-kube-api-access-gq6sb\") on node \"crc\" DevicePath \"\"" Nov 21 13:50:05 crc kubenswrapper[5133]: I1121 13:50:05.508234 5133 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/fc3cef5b-e125-43c6-be9e-52ae6617e01a-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 21 13:50:06 crc kubenswrapper[5133]: I1121 13:50:06.069371 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" event={"ID":"fc3cef5b-e125-43c6-be9e-52ae6617e01a","Type":"ContainerDied","Data":"e1d194fe5caa27b7fe5b709029eac21b433162fbd46fe8ad73f72ddcd60d0dd1"} Nov 21 13:50:06 crc kubenswrapper[5133]: I1121 13:50:06.069433 5133 scope.go:117] "RemoveContainer" containerID="41a42c248eacc6cd87aa16ede50efbc988270bf60b3c42307b41f37dd4248165" Nov 21 13:50:06 crc kubenswrapper[5133]: I1121 13:50:06.069506 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-5rjt4" Nov 21 13:50:06 crc kubenswrapper[5133]: I1121 13:50:06.117801 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-5rjt4"] Nov 21 13:50:06 crc kubenswrapper[5133]: I1121 13:50:06.128741 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-5rjt4"] Nov 21 13:50:06 crc kubenswrapper[5133]: I1121 13:50:06.472196 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fc3cef5b-e125-43c6-be9e-52ae6617e01a" path="/var/lib/kubelet/pods/fc3cef5b-e125-43c6-be9e-52ae6617e01a/volumes" Nov 21 13:52:23 crc kubenswrapper[5133]: I1121 13:52:23.310870 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 13:52:23 crc kubenswrapper[5133]: I1121 13:52:23.311781 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 13:52:53 crc kubenswrapper[5133]: I1121 13:52:53.311398 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 13:52:53 crc kubenswrapper[5133]: I1121 13:52:53.312316 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 13:53:23 crc kubenswrapper[5133]: I1121 13:53:23.311026 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 13:53:23 crc kubenswrapper[5133]: I1121 13:53:23.311965 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 13:53:23 crc kubenswrapper[5133]: I1121 13:53:23.312068 5133 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" Nov 21 13:53:23 crc kubenswrapper[5133]: I1121 13:53:23.312955 5133 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"603056172cb84b83a56a09f7ae6762f8e617df9b4803981cd538b0aaab00d221"} pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 13:53:23 crc kubenswrapper[5133]: I1121 13:53:23.313165 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" containerID="cri-o://603056172cb84b83a56a09f7ae6762f8e617df9b4803981cd538b0aaab00d221" gracePeriod=600 Nov 21 13:53:24 crc kubenswrapper[5133]: I1121 13:53:24.351694 5133 generic.go:334] "Generic (PLEG): container finished" podID="52f5a729-05d1-4f84-a216-1df3233af57d" containerID="603056172cb84b83a56a09f7ae6762f8e617df9b4803981cd538b0aaab00d221" exitCode=0 Nov 21 13:53:24 crc kubenswrapper[5133]: I1121 13:53:24.351793 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" event={"ID":"52f5a729-05d1-4f84-a216-1df3233af57d","Type":"ContainerDied","Data":"603056172cb84b83a56a09f7ae6762f8e617df9b4803981cd538b0aaab00d221"} Nov 21 13:53:24 crc kubenswrapper[5133]: I1121 13:53:24.352312 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" event={"ID":"52f5a729-05d1-4f84-a216-1df3233af57d","Type":"ContainerStarted","Data":"8270d91a0dc8e867dadddbaf1606a312bc821670bb959c0005ca6906c1d15307"} Nov 21 13:53:24 crc kubenswrapper[5133]: I1121 13:53:24.352351 5133 scope.go:117] "RemoveContainer" containerID="dd93f35af870e12207633edd0793e79b2dcd5d4e2167c242b49f29a86ebd07a8" Nov 21 13:55:01 crc kubenswrapper[5133]: I1121 13:55:01.678203 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-5pm4x"] Nov 21 13:55:01 crc kubenswrapper[5133]: I1121 13:55:01.679359 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-5pm4x" podUID="e2755732-9869-46c9-be0e-e1fc77aa6644" containerName="controller-manager" containerID="cri-o://da0645a2ed5de0ca552a83c927ae4d00e796fcfecb2572ceb222a87c04bb2539" gracePeriod=30 Nov 21 13:55:01 crc kubenswrapper[5133]: I1121 13:55:01.762492 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-zg29g"] Nov 21 13:55:01 crc kubenswrapper[5133]: I1121 13:55:01.762719 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zg29g" podUID="241eb3eb-c31a-4fe5-8547-34a326d75803" containerName="route-controller-manager" containerID="cri-o://8ac09f959915ca968af51204932c60a203e4932dc5d459af8cdcd176c1462b7a" gracePeriod=30 Nov 21 13:55:02 crc kubenswrapper[5133]: I1121 13:55:02.009398 5133 generic.go:334] "Generic (PLEG): container finished" podID="e2755732-9869-46c9-be0e-e1fc77aa6644" containerID="da0645a2ed5de0ca552a83c927ae4d00e796fcfecb2572ceb222a87c04bb2539" exitCode=0 Nov 21 13:55:02 crc kubenswrapper[5133]: I1121 13:55:02.009480 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-5pm4x" event={"ID":"e2755732-9869-46c9-be0e-e1fc77aa6644","Type":"ContainerDied","Data":"da0645a2ed5de0ca552a83c927ae4d00e796fcfecb2572ceb222a87c04bb2539"} Nov 21 13:55:02 crc kubenswrapper[5133]: I1121 13:55:02.012217 5133 generic.go:334] "Generic (PLEG): container finished" podID="241eb3eb-c31a-4fe5-8547-34a326d75803" containerID="8ac09f959915ca968af51204932c60a203e4932dc5d459af8cdcd176c1462b7a" exitCode=0 Nov 21 13:55:02 crc kubenswrapper[5133]: I1121 13:55:02.012252 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zg29g" event={"ID":"241eb3eb-c31a-4fe5-8547-34a326d75803","Type":"ContainerDied","Data":"8ac09f959915ca968af51204932c60a203e4932dc5d459af8cdcd176c1462b7a"} Nov 21 13:55:02 crc kubenswrapper[5133]: I1121 13:55:02.651344 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-5pm4x" Nov 21 13:55:02 crc kubenswrapper[5133]: I1121 13:55:02.709369 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zg29g" Nov 21 13:55:02 crc kubenswrapper[5133]: I1121 13:55:02.819799 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/241eb3eb-c31a-4fe5-8547-34a326d75803-serving-cert\") pod \"241eb3eb-c31a-4fe5-8547-34a326d75803\" (UID: \"241eb3eb-c31a-4fe5-8547-34a326d75803\") " Nov 21 13:55:02 crc kubenswrapper[5133]: I1121 13:55:02.819861 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-47ptm\" (UniqueName: \"kubernetes.io/projected/e2755732-9869-46c9-be0e-e1fc77aa6644-kube-api-access-47ptm\") pod \"e2755732-9869-46c9-be0e-e1fc77aa6644\" (UID: \"e2755732-9869-46c9-be0e-e1fc77aa6644\") " Nov 21 13:55:02 crc kubenswrapper[5133]: I1121 13:55:02.819916 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/e2755732-9869-46c9-be0e-e1fc77aa6644-proxy-ca-bundles\") pod \"e2755732-9869-46c9-be0e-e1fc77aa6644\" (UID: \"e2755732-9869-46c9-be0e-e1fc77aa6644\") " Nov 21 13:55:02 crc kubenswrapper[5133]: I1121 13:55:02.819941 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e2755732-9869-46c9-be0e-e1fc77aa6644-serving-cert\") pod \"e2755732-9869-46c9-be0e-e1fc77aa6644\" (UID: \"e2755732-9869-46c9-be0e-e1fc77aa6644\") " Nov 21 13:55:02 crc kubenswrapper[5133]: I1121 13:55:02.819975 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e2755732-9869-46c9-be0e-e1fc77aa6644-config\") pod \"e2755732-9869-46c9-be0e-e1fc77aa6644\" (UID: \"e2755732-9869-46c9-be0e-e1fc77aa6644\") " Nov 21 13:55:02 crc kubenswrapper[5133]: I1121 13:55:02.820080 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/241eb3eb-c31a-4fe5-8547-34a326d75803-client-ca\") pod \"241eb3eb-c31a-4fe5-8547-34a326d75803\" (UID: \"241eb3eb-c31a-4fe5-8547-34a326d75803\") " Nov 21 13:55:02 crc kubenswrapper[5133]: I1121 13:55:02.820115 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e2755732-9869-46c9-be0e-e1fc77aa6644-client-ca\") pod \"e2755732-9869-46c9-be0e-e1fc77aa6644\" (UID: \"e2755732-9869-46c9-be0e-e1fc77aa6644\") " Nov 21 13:55:02 crc kubenswrapper[5133]: I1121 13:55:02.820147 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p7jl9\" (UniqueName: \"kubernetes.io/projected/241eb3eb-c31a-4fe5-8547-34a326d75803-kube-api-access-p7jl9\") pod \"241eb3eb-c31a-4fe5-8547-34a326d75803\" (UID: \"241eb3eb-c31a-4fe5-8547-34a326d75803\") " Nov 21 13:55:02 crc kubenswrapper[5133]: I1121 13:55:02.820175 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/241eb3eb-c31a-4fe5-8547-34a326d75803-config\") pod \"241eb3eb-c31a-4fe5-8547-34a326d75803\" (UID: \"241eb3eb-c31a-4fe5-8547-34a326d75803\") " Nov 21 13:55:02 crc kubenswrapper[5133]: I1121 13:55:02.821218 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e2755732-9869-46c9-be0e-e1fc77aa6644-client-ca" (OuterVolumeSpecName: "client-ca") pod "e2755732-9869-46c9-be0e-e1fc77aa6644" (UID: "e2755732-9869-46c9-be0e-e1fc77aa6644"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:55:02 crc kubenswrapper[5133]: I1121 13:55:02.821210 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e2755732-9869-46c9-be0e-e1fc77aa6644-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "e2755732-9869-46c9-be0e-e1fc77aa6644" (UID: "e2755732-9869-46c9-be0e-e1fc77aa6644"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:55:02 crc kubenswrapper[5133]: I1121 13:55:02.821437 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e2755732-9869-46c9-be0e-e1fc77aa6644-config" (OuterVolumeSpecName: "config") pod "e2755732-9869-46c9-be0e-e1fc77aa6644" (UID: "e2755732-9869-46c9-be0e-e1fc77aa6644"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:55:02 crc kubenswrapper[5133]: I1121 13:55:02.821627 5133 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/e2755732-9869-46c9-be0e-e1fc77aa6644-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 21 13:55:02 crc kubenswrapper[5133]: I1121 13:55:02.821650 5133 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e2755732-9869-46c9-be0e-e1fc77aa6644-client-ca\") on node \"crc\" DevicePath \"\"" Nov 21 13:55:02 crc kubenswrapper[5133]: I1121 13:55:02.821780 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/241eb3eb-c31a-4fe5-8547-34a326d75803-config" (OuterVolumeSpecName: "config") pod "241eb3eb-c31a-4fe5-8547-34a326d75803" (UID: "241eb3eb-c31a-4fe5-8547-34a326d75803"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:55:02 crc kubenswrapper[5133]: I1121 13:55:02.822081 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/241eb3eb-c31a-4fe5-8547-34a326d75803-client-ca" (OuterVolumeSpecName: "client-ca") pod "241eb3eb-c31a-4fe5-8547-34a326d75803" (UID: "241eb3eb-c31a-4fe5-8547-34a326d75803"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:55:02 crc kubenswrapper[5133]: I1121 13:55:02.827639 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e2755732-9869-46c9-be0e-e1fc77aa6644-kube-api-access-47ptm" (OuterVolumeSpecName: "kube-api-access-47ptm") pod "e2755732-9869-46c9-be0e-e1fc77aa6644" (UID: "e2755732-9869-46c9-be0e-e1fc77aa6644"). InnerVolumeSpecName "kube-api-access-47ptm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:55:02 crc kubenswrapper[5133]: I1121 13:55:02.827718 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e2755732-9869-46c9-be0e-e1fc77aa6644-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e2755732-9869-46c9-be0e-e1fc77aa6644" (UID: "e2755732-9869-46c9-be0e-e1fc77aa6644"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:55:02 crc kubenswrapper[5133]: I1121 13:55:02.828191 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/241eb3eb-c31a-4fe5-8547-34a326d75803-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "241eb3eb-c31a-4fe5-8547-34a326d75803" (UID: "241eb3eb-c31a-4fe5-8547-34a326d75803"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:55:02 crc kubenswrapper[5133]: I1121 13:55:02.830155 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/241eb3eb-c31a-4fe5-8547-34a326d75803-kube-api-access-p7jl9" (OuterVolumeSpecName: "kube-api-access-p7jl9") pod "241eb3eb-c31a-4fe5-8547-34a326d75803" (UID: "241eb3eb-c31a-4fe5-8547-34a326d75803"). InnerVolumeSpecName "kube-api-access-p7jl9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:55:02 crc kubenswrapper[5133]: I1121 13:55:02.922836 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-47ptm\" (UniqueName: \"kubernetes.io/projected/e2755732-9869-46c9-be0e-e1fc77aa6644-kube-api-access-47ptm\") on node \"crc\" DevicePath \"\"" Nov 21 13:55:02 crc kubenswrapper[5133]: I1121 13:55:02.922888 5133 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e2755732-9869-46c9-be0e-e1fc77aa6644-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 13:55:02 crc kubenswrapper[5133]: I1121 13:55:02.922906 5133 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e2755732-9869-46c9-be0e-e1fc77aa6644-config\") on node \"crc\" DevicePath \"\"" Nov 21 13:55:02 crc kubenswrapper[5133]: I1121 13:55:02.922923 5133 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/241eb3eb-c31a-4fe5-8547-34a326d75803-client-ca\") on node \"crc\" DevicePath \"\"" Nov 21 13:55:02 crc kubenswrapper[5133]: I1121 13:55:02.922938 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p7jl9\" (UniqueName: \"kubernetes.io/projected/241eb3eb-c31a-4fe5-8547-34a326d75803-kube-api-access-p7jl9\") on node \"crc\" DevicePath \"\"" Nov 21 13:55:02 crc kubenswrapper[5133]: I1121 13:55:02.922953 5133 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/241eb3eb-c31a-4fe5-8547-34a326d75803-config\") on node \"crc\" DevicePath \"\"" Nov 21 13:55:02 crc kubenswrapper[5133]: I1121 13:55:02.922968 5133 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/241eb3eb-c31a-4fe5-8547-34a326d75803-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 13:55:03 crc kubenswrapper[5133]: I1121 13:55:03.021387 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-5pm4x" Nov 21 13:55:03 crc kubenswrapper[5133]: I1121 13:55:03.021385 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-5pm4x" event={"ID":"e2755732-9869-46c9-be0e-e1fc77aa6644","Type":"ContainerDied","Data":"14e7b6bdc3eabc2c5b05eaca846925bc87e90895b58574bf56e750b5d80a81c2"} Nov 21 13:55:03 crc kubenswrapper[5133]: I1121 13:55:03.022072 5133 scope.go:117] "RemoveContainer" containerID="da0645a2ed5de0ca552a83c927ae4d00e796fcfecb2572ceb222a87c04bb2539" Nov 21 13:55:03 crc kubenswrapper[5133]: I1121 13:55:03.023892 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zg29g" event={"ID":"241eb3eb-c31a-4fe5-8547-34a326d75803","Type":"ContainerDied","Data":"f6e1d489911c2aa1988817c3927a2c6cc36caef5b152a31ebb03038989b30777"} Nov 21 13:55:03 crc kubenswrapper[5133]: I1121 13:55:03.023972 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zg29g" Nov 21 13:55:03 crc kubenswrapper[5133]: I1121 13:55:03.054388 5133 scope.go:117] "RemoveContainer" containerID="8ac09f959915ca968af51204932c60a203e4932dc5d459af8cdcd176c1462b7a" Nov 21 13:55:03 crc kubenswrapper[5133]: I1121 13:55:03.073923 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-zg29g"] Nov 21 13:55:03 crc kubenswrapper[5133]: I1121 13:55:03.087083 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-zg29g"] Nov 21 13:55:03 crc kubenswrapper[5133]: I1121 13:55:03.094621 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-5pm4x"] Nov 21 13:55:03 crc kubenswrapper[5133]: I1121 13:55:03.100842 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-5pm4x"] Nov 21 13:55:03 crc kubenswrapper[5133]: I1121 13:55:03.259627 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-f7bf9444c-jvhb9"] Nov 21 13:55:03 crc kubenswrapper[5133]: E1121 13:55:03.259981 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="241eb3eb-c31a-4fe5-8547-34a326d75803" containerName="route-controller-manager" Nov 21 13:55:03 crc kubenswrapper[5133]: I1121 13:55:03.260026 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="241eb3eb-c31a-4fe5-8547-34a326d75803" containerName="route-controller-manager" Nov 21 13:55:03 crc kubenswrapper[5133]: E1121 13:55:03.260051 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e2755732-9869-46c9-be0e-e1fc77aa6644" containerName="controller-manager" Nov 21 13:55:03 crc kubenswrapper[5133]: I1121 13:55:03.260063 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="e2755732-9869-46c9-be0e-e1fc77aa6644" containerName="controller-manager" Nov 21 13:55:03 crc kubenswrapper[5133]: E1121 13:55:03.260076 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fc3cef5b-e125-43c6-be9e-52ae6617e01a" containerName="registry" Nov 21 13:55:03 crc kubenswrapper[5133]: I1121 13:55:03.260085 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="fc3cef5b-e125-43c6-be9e-52ae6617e01a" containerName="registry" Nov 21 13:55:03 crc kubenswrapper[5133]: I1121 13:55:03.260211 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="fc3cef5b-e125-43c6-be9e-52ae6617e01a" containerName="registry" Nov 21 13:55:03 crc kubenswrapper[5133]: I1121 13:55:03.260229 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="e2755732-9869-46c9-be0e-e1fc77aa6644" containerName="controller-manager" Nov 21 13:55:03 crc kubenswrapper[5133]: I1121 13:55:03.260245 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="241eb3eb-c31a-4fe5-8547-34a326d75803" containerName="route-controller-manager" Nov 21 13:55:03 crc kubenswrapper[5133]: I1121 13:55:03.260766 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-f7bf9444c-jvhb9" Nov 21 13:55:03 crc kubenswrapper[5133]: I1121 13:55:03.265366 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 21 13:55:03 crc kubenswrapper[5133]: I1121 13:55:03.266804 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 21 13:55:03 crc kubenswrapper[5133]: I1121 13:55:03.267704 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 21 13:55:03 crc kubenswrapper[5133]: I1121 13:55:03.268180 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 21 13:55:03 crc kubenswrapper[5133]: I1121 13:55:03.268773 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 21 13:55:03 crc kubenswrapper[5133]: I1121 13:55:03.269426 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 21 13:55:03 crc kubenswrapper[5133]: I1121 13:55:03.269728 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-5957fbdcff-h2bxq"] Nov 21 13:55:03 crc kubenswrapper[5133]: I1121 13:55:03.271292 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5957fbdcff-h2bxq" Nov 21 13:55:03 crc kubenswrapper[5133]: I1121 13:55:03.273916 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-f7bf9444c-jvhb9"] Nov 21 13:55:03 crc kubenswrapper[5133]: I1121 13:55:03.276548 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 21 13:55:03 crc kubenswrapper[5133]: I1121 13:55:03.276677 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 21 13:55:03 crc kubenswrapper[5133]: I1121 13:55:03.276785 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 21 13:55:03 crc kubenswrapper[5133]: I1121 13:55:03.276855 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 21 13:55:03 crc kubenswrapper[5133]: I1121 13:55:03.276952 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 21 13:55:03 crc kubenswrapper[5133]: I1121 13:55:03.277033 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 21 13:55:03 crc kubenswrapper[5133]: I1121 13:55:03.280463 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-5957fbdcff-h2bxq"] Nov 21 13:55:03 crc kubenswrapper[5133]: I1121 13:55:03.289534 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 21 13:55:03 crc kubenswrapper[5133]: I1121 13:55:03.429849 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-shk5p\" (UniqueName: \"kubernetes.io/projected/27f2b908-a72e-4201-9065-9658c6c82b21-kube-api-access-shk5p\") pod \"controller-manager-5957fbdcff-h2bxq\" (UID: \"27f2b908-a72e-4201-9065-9658c6c82b21\") " pod="openshift-controller-manager/controller-manager-5957fbdcff-h2bxq" Nov 21 13:55:03 crc kubenswrapper[5133]: I1121 13:55:03.429932 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/27f2b908-a72e-4201-9065-9658c6c82b21-config\") pod \"controller-manager-5957fbdcff-h2bxq\" (UID: \"27f2b908-a72e-4201-9065-9658c6c82b21\") " pod="openshift-controller-manager/controller-manager-5957fbdcff-h2bxq" Nov 21 13:55:03 crc kubenswrapper[5133]: I1121 13:55:03.430046 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/27f2b908-a72e-4201-9065-9658c6c82b21-client-ca\") pod \"controller-manager-5957fbdcff-h2bxq\" (UID: \"27f2b908-a72e-4201-9065-9658c6c82b21\") " pod="openshift-controller-manager/controller-manager-5957fbdcff-h2bxq" Nov 21 13:55:03 crc kubenswrapper[5133]: I1121 13:55:03.430072 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/27f2b908-a72e-4201-9065-9658c6c82b21-serving-cert\") pod \"controller-manager-5957fbdcff-h2bxq\" (UID: \"27f2b908-a72e-4201-9065-9658c6c82b21\") " pod="openshift-controller-manager/controller-manager-5957fbdcff-h2bxq" Nov 21 13:55:03 crc kubenswrapper[5133]: I1121 13:55:03.430119 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b4680a6c-f3ab-490a-8032-1f86a17bc118-config\") pod \"route-controller-manager-f7bf9444c-jvhb9\" (UID: \"b4680a6c-f3ab-490a-8032-1f86a17bc118\") " pod="openshift-route-controller-manager/route-controller-manager-f7bf9444c-jvhb9" Nov 21 13:55:03 crc kubenswrapper[5133]: I1121 13:55:03.430210 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b4680a6c-f3ab-490a-8032-1f86a17bc118-client-ca\") pod \"route-controller-manager-f7bf9444c-jvhb9\" (UID: \"b4680a6c-f3ab-490a-8032-1f86a17bc118\") " pod="openshift-route-controller-manager/route-controller-manager-f7bf9444c-jvhb9" Nov 21 13:55:03 crc kubenswrapper[5133]: I1121 13:55:03.430244 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-crhc7\" (UniqueName: \"kubernetes.io/projected/b4680a6c-f3ab-490a-8032-1f86a17bc118-kube-api-access-crhc7\") pod \"route-controller-manager-f7bf9444c-jvhb9\" (UID: \"b4680a6c-f3ab-490a-8032-1f86a17bc118\") " pod="openshift-route-controller-manager/route-controller-manager-f7bf9444c-jvhb9" Nov 21 13:55:03 crc kubenswrapper[5133]: I1121 13:55:03.430310 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b4680a6c-f3ab-490a-8032-1f86a17bc118-serving-cert\") pod \"route-controller-manager-f7bf9444c-jvhb9\" (UID: \"b4680a6c-f3ab-490a-8032-1f86a17bc118\") " pod="openshift-route-controller-manager/route-controller-manager-f7bf9444c-jvhb9" Nov 21 13:55:03 crc kubenswrapper[5133]: I1121 13:55:03.430361 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/27f2b908-a72e-4201-9065-9658c6c82b21-proxy-ca-bundles\") pod \"controller-manager-5957fbdcff-h2bxq\" (UID: \"27f2b908-a72e-4201-9065-9658c6c82b21\") " pod="openshift-controller-manager/controller-manager-5957fbdcff-h2bxq" Nov 21 13:55:03 crc kubenswrapper[5133]: I1121 13:55:03.531767 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b4680a6c-f3ab-490a-8032-1f86a17bc118-config\") pod \"route-controller-manager-f7bf9444c-jvhb9\" (UID: \"b4680a6c-f3ab-490a-8032-1f86a17bc118\") " pod="openshift-route-controller-manager/route-controller-manager-f7bf9444c-jvhb9" Nov 21 13:55:03 crc kubenswrapper[5133]: I1121 13:55:03.531896 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b4680a6c-f3ab-490a-8032-1f86a17bc118-client-ca\") pod \"route-controller-manager-f7bf9444c-jvhb9\" (UID: \"b4680a6c-f3ab-490a-8032-1f86a17bc118\") " pod="openshift-route-controller-manager/route-controller-manager-f7bf9444c-jvhb9" Nov 21 13:55:03 crc kubenswrapper[5133]: I1121 13:55:03.531952 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-crhc7\" (UniqueName: \"kubernetes.io/projected/b4680a6c-f3ab-490a-8032-1f86a17bc118-kube-api-access-crhc7\") pod \"route-controller-manager-f7bf9444c-jvhb9\" (UID: \"b4680a6c-f3ab-490a-8032-1f86a17bc118\") " pod="openshift-route-controller-manager/route-controller-manager-f7bf9444c-jvhb9" Nov 21 13:55:03 crc kubenswrapper[5133]: I1121 13:55:03.532041 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b4680a6c-f3ab-490a-8032-1f86a17bc118-serving-cert\") pod \"route-controller-manager-f7bf9444c-jvhb9\" (UID: \"b4680a6c-f3ab-490a-8032-1f86a17bc118\") " pod="openshift-route-controller-manager/route-controller-manager-f7bf9444c-jvhb9" Nov 21 13:55:03 crc kubenswrapper[5133]: I1121 13:55:03.532116 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/27f2b908-a72e-4201-9065-9658c6c82b21-proxy-ca-bundles\") pod \"controller-manager-5957fbdcff-h2bxq\" (UID: \"27f2b908-a72e-4201-9065-9658c6c82b21\") " pod="openshift-controller-manager/controller-manager-5957fbdcff-h2bxq" Nov 21 13:55:03 crc kubenswrapper[5133]: I1121 13:55:03.532183 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-shk5p\" (UniqueName: \"kubernetes.io/projected/27f2b908-a72e-4201-9065-9658c6c82b21-kube-api-access-shk5p\") pod \"controller-manager-5957fbdcff-h2bxq\" (UID: \"27f2b908-a72e-4201-9065-9658c6c82b21\") " pod="openshift-controller-manager/controller-manager-5957fbdcff-h2bxq" Nov 21 13:55:03 crc kubenswrapper[5133]: I1121 13:55:03.532253 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/27f2b908-a72e-4201-9065-9658c6c82b21-config\") pod \"controller-manager-5957fbdcff-h2bxq\" (UID: \"27f2b908-a72e-4201-9065-9658c6c82b21\") " pod="openshift-controller-manager/controller-manager-5957fbdcff-h2bxq" Nov 21 13:55:03 crc kubenswrapper[5133]: I1121 13:55:03.532333 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/27f2b908-a72e-4201-9065-9658c6c82b21-client-ca\") pod \"controller-manager-5957fbdcff-h2bxq\" (UID: \"27f2b908-a72e-4201-9065-9658c6c82b21\") " pod="openshift-controller-manager/controller-manager-5957fbdcff-h2bxq" Nov 21 13:55:03 crc kubenswrapper[5133]: I1121 13:55:03.532364 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/27f2b908-a72e-4201-9065-9658c6c82b21-serving-cert\") pod \"controller-manager-5957fbdcff-h2bxq\" (UID: \"27f2b908-a72e-4201-9065-9658c6c82b21\") " pod="openshift-controller-manager/controller-manager-5957fbdcff-h2bxq" Nov 21 13:55:03 crc kubenswrapper[5133]: I1121 13:55:03.533092 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b4680a6c-f3ab-490a-8032-1f86a17bc118-client-ca\") pod \"route-controller-manager-f7bf9444c-jvhb9\" (UID: \"b4680a6c-f3ab-490a-8032-1f86a17bc118\") " pod="openshift-route-controller-manager/route-controller-manager-f7bf9444c-jvhb9" Nov 21 13:55:03 crc kubenswrapper[5133]: I1121 13:55:03.533772 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/27f2b908-a72e-4201-9065-9658c6c82b21-client-ca\") pod \"controller-manager-5957fbdcff-h2bxq\" (UID: \"27f2b908-a72e-4201-9065-9658c6c82b21\") " pod="openshift-controller-manager/controller-manager-5957fbdcff-h2bxq" Nov 21 13:55:03 crc kubenswrapper[5133]: I1121 13:55:03.534068 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b4680a6c-f3ab-490a-8032-1f86a17bc118-config\") pod \"route-controller-manager-f7bf9444c-jvhb9\" (UID: \"b4680a6c-f3ab-490a-8032-1f86a17bc118\") " pod="openshift-route-controller-manager/route-controller-manager-f7bf9444c-jvhb9" Nov 21 13:55:03 crc kubenswrapper[5133]: I1121 13:55:03.534720 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/27f2b908-a72e-4201-9065-9658c6c82b21-proxy-ca-bundles\") pod \"controller-manager-5957fbdcff-h2bxq\" (UID: \"27f2b908-a72e-4201-9065-9658c6c82b21\") " pod="openshift-controller-manager/controller-manager-5957fbdcff-h2bxq" Nov 21 13:55:03 crc kubenswrapper[5133]: I1121 13:55:03.534889 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/27f2b908-a72e-4201-9065-9658c6c82b21-config\") pod \"controller-manager-5957fbdcff-h2bxq\" (UID: \"27f2b908-a72e-4201-9065-9658c6c82b21\") " pod="openshift-controller-manager/controller-manager-5957fbdcff-h2bxq" Nov 21 13:55:03 crc kubenswrapper[5133]: I1121 13:55:03.540058 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b4680a6c-f3ab-490a-8032-1f86a17bc118-serving-cert\") pod \"route-controller-manager-f7bf9444c-jvhb9\" (UID: \"b4680a6c-f3ab-490a-8032-1f86a17bc118\") " pod="openshift-route-controller-manager/route-controller-manager-f7bf9444c-jvhb9" Nov 21 13:55:03 crc kubenswrapper[5133]: I1121 13:55:03.540062 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/27f2b908-a72e-4201-9065-9658c6c82b21-serving-cert\") pod \"controller-manager-5957fbdcff-h2bxq\" (UID: \"27f2b908-a72e-4201-9065-9658c6c82b21\") " pod="openshift-controller-manager/controller-manager-5957fbdcff-h2bxq" Nov 21 13:55:03 crc kubenswrapper[5133]: I1121 13:55:03.550566 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-crhc7\" (UniqueName: \"kubernetes.io/projected/b4680a6c-f3ab-490a-8032-1f86a17bc118-kube-api-access-crhc7\") pod \"route-controller-manager-f7bf9444c-jvhb9\" (UID: \"b4680a6c-f3ab-490a-8032-1f86a17bc118\") " pod="openshift-route-controller-manager/route-controller-manager-f7bf9444c-jvhb9" Nov 21 13:55:03 crc kubenswrapper[5133]: I1121 13:55:03.561736 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-shk5p\" (UniqueName: \"kubernetes.io/projected/27f2b908-a72e-4201-9065-9658c6c82b21-kube-api-access-shk5p\") pod \"controller-manager-5957fbdcff-h2bxq\" (UID: \"27f2b908-a72e-4201-9065-9658c6c82b21\") " pod="openshift-controller-manager/controller-manager-5957fbdcff-h2bxq" Nov 21 13:55:03 crc kubenswrapper[5133]: I1121 13:55:03.595428 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-f7bf9444c-jvhb9" Nov 21 13:55:03 crc kubenswrapper[5133]: I1121 13:55:03.607656 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5957fbdcff-h2bxq" Nov 21 13:55:03 crc kubenswrapper[5133]: I1121 13:55:03.649868 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-f7bf9444c-jvhb9"] Nov 21 13:55:03 crc kubenswrapper[5133]: I1121 13:55:03.845789 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-f7bf9444c-jvhb9"] Nov 21 13:55:03 crc kubenswrapper[5133]: I1121 13:55:03.887491 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-5957fbdcff-h2bxq"] Nov 21 13:55:03 crc kubenswrapper[5133]: W1121 13:55:03.899325 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod27f2b908_a72e_4201_9065_9658c6c82b21.slice/crio-0bd2252e7335d427366b0aba25a73e2adc2edc06558e1d134e6be03c7cf5d984 WatchSource:0}: Error finding container 0bd2252e7335d427366b0aba25a73e2adc2edc06558e1d134e6be03c7cf5d984: Status 404 returned error can't find the container with id 0bd2252e7335d427366b0aba25a73e2adc2edc06558e1d134e6be03c7cf5d984 Nov 21 13:55:04 crc kubenswrapper[5133]: I1121 13:55:04.030247 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-f7bf9444c-jvhb9" event={"ID":"b4680a6c-f3ab-490a-8032-1f86a17bc118","Type":"ContainerStarted","Data":"7d952389d3d3633c272efdefe72953ebb2899c14eefddc45f38d78da6687268a"} Nov 21 13:55:04 crc kubenswrapper[5133]: I1121 13:55:04.032706 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5957fbdcff-h2bxq" event={"ID":"27f2b908-a72e-4201-9065-9658c6c82b21","Type":"ContainerStarted","Data":"0bd2252e7335d427366b0aba25a73e2adc2edc06558e1d134e6be03c7cf5d984"} Nov 21 13:55:04 crc kubenswrapper[5133]: I1121 13:55:04.468900 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="241eb3eb-c31a-4fe5-8547-34a326d75803" path="/var/lib/kubelet/pods/241eb3eb-c31a-4fe5-8547-34a326d75803/volumes" Nov 21 13:55:04 crc kubenswrapper[5133]: I1121 13:55:04.470217 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e2755732-9869-46c9-be0e-e1fc77aa6644" path="/var/lib/kubelet/pods/e2755732-9869-46c9-be0e-e1fc77aa6644/volumes" Nov 21 13:55:05 crc kubenswrapper[5133]: I1121 13:55:05.040840 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-f7bf9444c-jvhb9" event={"ID":"b4680a6c-f3ab-490a-8032-1f86a17bc118","Type":"ContainerStarted","Data":"52c987a85e2cdc39fcb5c2a54dff0cb3f46026751eb4b48ac029a4d7e42ca039"} Nov 21 13:55:05 crc kubenswrapper[5133]: I1121 13:55:05.041050 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-f7bf9444c-jvhb9" podUID="b4680a6c-f3ab-490a-8032-1f86a17bc118" containerName="route-controller-manager" containerID="cri-o://52c987a85e2cdc39fcb5c2a54dff0cb3f46026751eb4b48ac029a4d7e42ca039" gracePeriod=30 Nov 21 13:55:05 crc kubenswrapper[5133]: I1121 13:55:05.041301 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-f7bf9444c-jvhb9" Nov 21 13:55:05 crc kubenswrapper[5133]: I1121 13:55:05.043471 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5957fbdcff-h2bxq" event={"ID":"27f2b908-a72e-4201-9065-9658c6c82b21","Type":"ContainerStarted","Data":"4c7cb6ad487932666764c730361f7c614ce979988f9d00ca619b051661826045"} Nov 21 13:55:05 crc kubenswrapper[5133]: I1121 13:55:05.043742 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-5957fbdcff-h2bxq" Nov 21 13:55:05 crc kubenswrapper[5133]: I1121 13:55:05.048591 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-5957fbdcff-h2bxq" Nov 21 13:55:05 crc kubenswrapper[5133]: I1121 13:55:05.049387 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-f7bf9444c-jvhb9" Nov 21 13:55:05 crc kubenswrapper[5133]: I1121 13:55:05.074102 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-f7bf9444c-jvhb9" podStartSLOduration=4.074074165 podStartE2EDuration="4.074074165s" podCreationTimestamp="2025-11-21 13:55:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 13:55:05.068607829 +0000 UTC m=+764.866440077" watchObservedRunningTime="2025-11-21 13:55:05.074074165 +0000 UTC m=+764.871906423" Nov 21 13:55:05 crc kubenswrapper[5133]: I1121 13:55:05.117085 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-5957fbdcff-h2bxq" podStartSLOduration=4.117062058 podStartE2EDuration="4.117062058s" podCreationTimestamp="2025-11-21 13:55:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 13:55:05.113766379 +0000 UTC m=+764.911598647" watchObservedRunningTime="2025-11-21 13:55:05.117062058 +0000 UTC m=+764.914894316" Nov 21 13:55:06 crc kubenswrapper[5133]: I1121 13:55:06.054454 5133 generic.go:334] "Generic (PLEG): container finished" podID="b4680a6c-f3ab-490a-8032-1f86a17bc118" containerID="52c987a85e2cdc39fcb5c2a54dff0cb3f46026751eb4b48ac029a4d7e42ca039" exitCode=0 Nov 21 13:55:06 crc kubenswrapper[5133]: I1121 13:55:06.054735 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-f7bf9444c-jvhb9" event={"ID":"b4680a6c-f3ab-490a-8032-1f86a17bc118","Type":"ContainerDied","Data":"52c987a85e2cdc39fcb5c2a54dff0cb3f46026751eb4b48ac029a4d7e42ca039"} Nov 21 13:55:06 crc kubenswrapper[5133]: I1121 13:55:06.126809 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-f7bf9444c-jvhb9" Nov 21 13:55:06 crc kubenswrapper[5133]: I1121 13:55:06.175119 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-85449d9544-n7s2k"] Nov 21 13:55:06 crc kubenswrapper[5133]: E1121 13:55:06.175435 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4680a6c-f3ab-490a-8032-1f86a17bc118" containerName="route-controller-manager" Nov 21 13:55:06 crc kubenswrapper[5133]: I1121 13:55:06.175451 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4680a6c-f3ab-490a-8032-1f86a17bc118" containerName="route-controller-manager" Nov 21 13:55:06 crc kubenswrapper[5133]: I1121 13:55:06.175567 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="b4680a6c-f3ab-490a-8032-1f86a17bc118" containerName="route-controller-manager" Nov 21 13:55:06 crc kubenswrapper[5133]: I1121 13:55:06.182551 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b4680a6c-f3ab-490a-8032-1f86a17bc118-client-ca\") pod \"b4680a6c-f3ab-490a-8032-1f86a17bc118\" (UID: \"b4680a6c-f3ab-490a-8032-1f86a17bc118\") " Nov 21 13:55:06 crc kubenswrapper[5133]: I1121 13:55:06.182615 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b4680a6c-f3ab-490a-8032-1f86a17bc118-config\") pod \"b4680a6c-f3ab-490a-8032-1f86a17bc118\" (UID: \"b4680a6c-f3ab-490a-8032-1f86a17bc118\") " Nov 21 13:55:06 crc kubenswrapper[5133]: I1121 13:55:06.182662 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-crhc7\" (UniqueName: \"kubernetes.io/projected/b4680a6c-f3ab-490a-8032-1f86a17bc118-kube-api-access-crhc7\") pod \"b4680a6c-f3ab-490a-8032-1f86a17bc118\" (UID: \"b4680a6c-f3ab-490a-8032-1f86a17bc118\") " Nov 21 13:55:06 crc kubenswrapper[5133]: I1121 13:55:06.182715 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b4680a6c-f3ab-490a-8032-1f86a17bc118-serving-cert\") pod \"b4680a6c-f3ab-490a-8032-1f86a17bc118\" (UID: \"b4680a6c-f3ab-490a-8032-1f86a17bc118\") " Nov 21 13:55:06 crc kubenswrapper[5133]: I1121 13:55:06.183721 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b4680a6c-f3ab-490a-8032-1f86a17bc118-client-ca" (OuterVolumeSpecName: "client-ca") pod "b4680a6c-f3ab-490a-8032-1f86a17bc118" (UID: "b4680a6c-f3ab-490a-8032-1f86a17bc118"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:55:06 crc kubenswrapper[5133]: I1121 13:55:06.183736 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b4680a6c-f3ab-490a-8032-1f86a17bc118-config" (OuterVolumeSpecName: "config") pod "b4680a6c-f3ab-490a-8032-1f86a17bc118" (UID: "b4680a6c-f3ab-490a-8032-1f86a17bc118"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:55:06 crc kubenswrapper[5133]: I1121 13:55:06.188189 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-85449d9544-n7s2k" Nov 21 13:55:06 crc kubenswrapper[5133]: I1121 13:55:06.191503 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b4680a6c-f3ab-490a-8032-1f86a17bc118-kube-api-access-crhc7" (OuterVolumeSpecName: "kube-api-access-crhc7") pod "b4680a6c-f3ab-490a-8032-1f86a17bc118" (UID: "b4680a6c-f3ab-490a-8032-1f86a17bc118"). InnerVolumeSpecName "kube-api-access-crhc7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:55:06 crc kubenswrapper[5133]: I1121 13:55:06.193051 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4680a6c-f3ab-490a-8032-1f86a17bc118-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "b4680a6c-f3ab-490a-8032-1f86a17bc118" (UID: "b4680a6c-f3ab-490a-8032-1f86a17bc118"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:55:06 crc kubenswrapper[5133]: I1121 13:55:06.199569 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-85449d9544-n7s2k"] Nov 21 13:55:06 crc kubenswrapper[5133]: I1121 13:55:06.284213 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/742f2084-bc58-4fd5-a4b5-9f8d6b9e99aa-config\") pod \"route-controller-manager-85449d9544-n7s2k\" (UID: \"742f2084-bc58-4fd5-a4b5-9f8d6b9e99aa\") " pod="openshift-route-controller-manager/route-controller-manager-85449d9544-n7s2k" Nov 21 13:55:06 crc kubenswrapper[5133]: I1121 13:55:06.284300 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/742f2084-bc58-4fd5-a4b5-9f8d6b9e99aa-serving-cert\") pod \"route-controller-manager-85449d9544-n7s2k\" (UID: \"742f2084-bc58-4fd5-a4b5-9f8d6b9e99aa\") " pod="openshift-route-controller-manager/route-controller-manager-85449d9544-n7s2k" Nov 21 13:55:06 crc kubenswrapper[5133]: I1121 13:55:06.284334 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zdl4f\" (UniqueName: \"kubernetes.io/projected/742f2084-bc58-4fd5-a4b5-9f8d6b9e99aa-kube-api-access-zdl4f\") pod \"route-controller-manager-85449d9544-n7s2k\" (UID: \"742f2084-bc58-4fd5-a4b5-9f8d6b9e99aa\") " pod="openshift-route-controller-manager/route-controller-manager-85449d9544-n7s2k" Nov 21 13:55:06 crc kubenswrapper[5133]: I1121 13:55:06.284360 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/742f2084-bc58-4fd5-a4b5-9f8d6b9e99aa-client-ca\") pod \"route-controller-manager-85449d9544-n7s2k\" (UID: \"742f2084-bc58-4fd5-a4b5-9f8d6b9e99aa\") " pod="openshift-route-controller-manager/route-controller-manager-85449d9544-n7s2k" Nov 21 13:55:06 crc kubenswrapper[5133]: I1121 13:55:06.284411 5133 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b4680a6c-f3ab-490a-8032-1f86a17bc118-client-ca\") on node \"crc\" DevicePath \"\"" Nov 21 13:55:06 crc kubenswrapper[5133]: I1121 13:55:06.284423 5133 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b4680a6c-f3ab-490a-8032-1f86a17bc118-config\") on node \"crc\" DevicePath \"\"" Nov 21 13:55:06 crc kubenswrapper[5133]: I1121 13:55:06.284432 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-crhc7\" (UniqueName: \"kubernetes.io/projected/b4680a6c-f3ab-490a-8032-1f86a17bc118-kube-api-access-crhc7\") on node \"crc\" DevicePath \"\"" Nov 21 13:55:06 crc kubenswrapper[5133]: I1121 13:55:06.284444 5133 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b4680a6c-f3ab-490a-8032-1f86a17bc118-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 13:55:06 crc kubenswrapper[5133]: I1121 13:55:06.385287 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/742f2084-bc58-4fd5-a4b5-9f8d6b9e99aa-serving-cert\") pod \"route-controller-manager-85449d9544-n7s2k\" (UID: \"742f2084-bc58-4fd5-a4b5-9f8d6b9e99aa\") " pod="openshift-route-controller-manager/route-controller-manager-85449d9544-n7s2k" Nov 21 13:55:06 crc kubenswrapper[5133]: I1121 13:55:06.385334 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zdl4f\" (UniqueName: \"kubernetes.io/projected/742f2084-bc58-4fd5-a4b5-9f8d6b9e99aa-kube-api-access-zdl4f\") pod \"route-controller-manager-85449d9544-n7s2k\" (UID: \"742f2084-bc58-4fd5-a4b5-9f8d6b9e99aa\") " pod="openshift-route-controller-manager/route-controller-manager-85449d9544-n7s2k" Nov 21 13:55:06 crc kubenswrapper[5133]: I1121 13:55:06.385357 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/742f2084-bc58-4fd5-a4b5-9f8d6b9e99aa-client-ca\") pod \"route-controller-manager-85449d9544-n7s2k\" (UID: \"742f2084-bc58-4fd5-a4b5-9f8d6b9e99aa\") " pod="openshift-route-controller-manager/route-controller-manager-85449d9544-n7s2k" Nov 21 13:55:06 crc kubenswrapper[5133]: I1121 13:55:06.385430 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/742f2084-bc58-4fd5-a4b5-9f8d6b9e99aa-config\") pod \"route-controller-manager-85449d9544-n7s2k\" (UID: \"742f2084-bc58-4fd5-a4b5-9f8d6b9e99aa\") " pod="openshift-route-controller-manager/route-controller-manager-85449d9544-n7s2k" Nov 21 13:55:06 crc kubenswrapper[5133]: I1121 13:55:06.387429 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/742f2084-bc58-4fd5-a4b5-9f8d6b9e99aa-config\") pod \"route-controller-manager-85449d9544-n7s2k\" (UID: \"742f2084-bc58-4fd5-a4b5-9f8d6b9e99aa\") " pod="openshift-route-controller-manager/route-controller-manager-85449d9544-n7s2k" Nov 21 13:55:06 crc kubenswrapper[5133]: I1121 13:55:06.389330 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/742f2084-bc58-4fd5-a4b5-9f8d6b9e99aa-client-ca\") pod \"route-controller-manager-85449d9544-n7s2k\" (UID: \"742f2084-bc58-4fd5-a4b5-9f8d6b9e99aa\") " pod="openshift-route-controller-manager/route-controller-manager-85449d9544-n7s2k" Nov 21 13:55:06 crc kubenswrapper[5133]: I1121 13:55:06.400061 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/742f2084-bc58-4fd5-a4b5-9f8d6b9e99aa-serving-cert\") pod \"route-controller-manager-85449d9544-n7s2k\" (UID: \"742f2084-bc58-4fd5-a4b5-9f8d6b9e99aa\") " pod="openshift-route-controller-manager/route-controller-manager-85449d9544-n7s2k" Nov 21 13:55:06 crc kubenswrapper[5133]: I1121 13:55:06.430769 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zdl4f\" (UniqueName: \"kubernetes.io/projected/742f2084-bc58-4fd5-a4b5-9f8d6b9e99aa-kube-api-access-zdl4f\") pod \"route-controller-manager-85449d9544-n7s2k\" (UID: \"742f2084-bc58-4fd5-a4b5-9f8d6b9e99aa\") " pod="openshift-route-controller-manager/route-controller-manager-85449d9544-n7s2k" Nov 21 13:55:06 crc kubenswrapper[5133]: I1121 13:55:06.548314 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-85449d9544-n7s2k" Nov 21 13:55:06 crc kubenswrapper[5133]: I1121 13:55:06.773731 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-85449d9544-n7s2k"] Nov 21 13:55:07 crc kubenswrapper[5133]: I1121 13:55:07.062745 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-f7bf9444c-jvhb9" event={"ID":"b4680a6c-f3ab-490a-8032-1f86a17bc118","Type":"ContainerDied","Data":"7d952389d3d3633c272efdefe72953ebb2899c14eefddc45f38d78da6687268a"} Nov 21 13:55:07 crc kubenswrapper[5133]: I1121 13:55:07.063192 5133 scope.go:117] "RemoveContainer" containerID="52c987a85e2cdc39fcb5c2a54dff0cb3f46026751eb4b48ac029a4d7e42ca039" Nov 21 13:55:07 crc kubenswrapper[5133]: I1121 13:55:07.062840 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-f7bf9444c-jvhb9" Nov 21 13:55:07 crc kubenswrapper[5133]: I1121 13:55:07.065188 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-85449d9544-n7s2k" event={"ID":"742f2084-bc58-4fd5-a4b5-9f8d6b9e99aa","Type":"ContainerStarted","Data":"f34df50eab20f04117f56ff6fb89e1dfca894537db39aa9fe66771519c65e437"} Nov 21 13:55:07 crc kubenswrapper[5133]: I1121 13:55:07.065218 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-85449d9544-n7s2k" event={"ID":"742f2084-bc58-4fd5-a4b5-9f8d6b9e99aa","Type":"ContainerStarted","Data":"a811d4e66aa53077ea64caadd13ff20a6b26575fdf8a1ea6fa81d6db12991c6c"} Nov 21 13:55:07 crc kubenswrapper[5133]: I1121 13:55:07.095794 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-f7bf9444c-jvhb9"] Nov 21 13:55:07 crc kubenswrapper[5133]: I1121 13:55:07.101649 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-f7bf9444c-jvhb9"] Nov 21 13:55:07 crc kubenswrapper[5133]: I1121 13:55:07.118027 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-85449d9544-n7s2k" podStartSLOduration=4.117988285 podStartE2EDuration="4.117988285s" podCreationTimestamp="2025-11-21 13:55:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 13:55:07.11517271 +0000 UTC m=+766.913004958" watchObservedRunningTime="2025-11-21 13:55:07.117988285 +0000 UTC m=+766.915820523" Nov 21 13:55:08 crc kubenswrapper[5133]: I1121 13:55:08.073815 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-85449d9544-n7s2k" Nov 21 13:55:08 crc kubenswrapper[5133]: I1121 13:55:08.081079 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-85449d9544-n7s2k" Nov 21 13:55:08 crc kubenswrapper[5133]: I1121 13:55:08.479885 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b4680a6c-f3ab-490a-8032-1f86a17bc118" path="/var/lib/kubelet/pods/b4680a6c-f3ab-490a-8032-1f86a17bc118/volumes" Nov 21 13:55:14 crc kubenswrapper[5133]: I1121 13:55:14.562954 5133 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 21 13:55:43 crc kubenswrapper[5133]: I1121 13:55:43.175127 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-mvs62"] Nov 21 13:55:43 crc kubenswrapper[5133]: I1121 13:55:43.176944 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-mvs62" Nov 21 13:55:43 crc kubenswrapper[5133]: I1121 13:55:43.186231 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Nov 21 13:55:43 crc kubenswrapper[5133]: I1121 13:55:43.186294 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Nov 21 13:55:43 crc kubenswrapper[5133]: I1121 13:55:43.188444 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-5b446d88c5-t5gk2"] Nov 21 13:55:43 crc kubenswrapper[5133]: I1121 13:55:43.189510 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-t5gk2" Nov 21 13:55:43 crc kubenswrapper[5133]: I1121 13:55:43.196077 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-mvs62"] Nov 21 13:55:43 crc kubenswrapper[5133]: I1121 13:55:43.197620 5133 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-hg8qp" Nov 21 13:55:43 crc kubenswrapper[5133]: I1121 13:55:43.199915 5133 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-mb7cp" Nov 21 13:55:43 crc kubenswrapper[5133]: I1121 13:55:43.201382 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jjj5w\" (UniqueName: \"kubernetes.io/projected/877c323d-4f37-4803-8376-43e5946404e2-kube-api-access-jjj5w\") pod \"cert-manager-5b446d88c5-t5gk2\" (UID: \"877c323d-4f37-4803-8376-43e5946404e2\") " pod="cert-manager/cert-manager-5b446d88c5-t5gk2" Nov 21 13:55:43 crc kubenswrapper[5133]: I1121 13:55:43.201495 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4hkbp\" (UniqueName: \"kubernetes.io/projected/f6bf4994-1e4e-4b36-96f8-db4c09620dc6-kube-api-access-4hkbp\") pod \"cert-manager-cainjector-7f985d654d-mvs62\" (UID: \"f6bf4994-1e4e-4b36-96f8-db4c09620dc6\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-mvs62" Nov 21 13:55:43 crc kubenswrapper[5133]: I1121 13:55:43.204570 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-t5gk2"] Nov 21 13:55:43 crc kubenswrapper[5133]: I1121 13:55:43.208579 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-k9w95"] Nov 21 13:55:43 crc kubenswrapper[5133]: I1121 13:55:43.209392 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-k9w95" Nov 21 13:55:43 crc kubenswrapper[5133]: I1121 13:55:43.211602 5133 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-28pnl" Nov 21 13:55:43 crc kubenswrapper[5133]: I1121 13:55:43.235432 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-k9w95"] Nov 21 13:55:43 crc kubenswrapper[5133]: I1121 13:55:43.305214 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4hkbp\" (UniqueName: \"kubernetes.io/projected/f6bf4994-1e4e-4b36-96f8-db4c09620dc6-kube-api-access-4hkbp\") pod \"cert-manager-cainjector-7f985d654d-mvs62\" (UID: \"f6bf4994-1e4e-4b36-96f8-db4c09620dc6\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-mvs62" Nov 21 13:55:43 crc kubenswrapper[5133]: I1121 13:55:43.305293 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p9tt6\" (UniqueName: \"kubernetes.io/projected/a3ddb6ce-5d67-47b1-8550-56613fa11579-kube-api-access-p9tt6\") pod \"cert-manager-webhook-5655c58dd6-k9w95\" (UID: \"a3ddb6ce-5d67-47b1-8550-56613fa11579\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-k9w95" Nov 21 13:55:43 crc kubenswrapper[5133]: I1121 13:55:43.305385 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jjj5w\" (UniqueName: \"kubernetes.io/projected/877c323d-4f37-4803-8376-43e5946404e2-kube-api-access-jjj5w\") pod \"cert-manager-5b446d88c5-t5gk2\" (UID: \"877c323d-4f37-4803-8376-43e5946404e2\") " pod="cert-manager/cert-manager-5b446d88c5-t5gk2" Nov 21 13:55:43 crc kubenswrapper[5133]: I1121 13:55:43.328595 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jjj5w\" (UniqueName: \"kubernetes.io/projected/877c323d-4f37-4803-8376-43e5946404e2-kube-api-access-jjj5w\") pod \"cert-manager-5b446d88c5-t5gk2\" (UID: \"877c323d-4f37-4803-8376-43e5946404e2\") " pod="cert-manager/cert-manager-5b446d88c5-t5gk2" Nov 21 13:55:43 crc kubenswrapper[5133]: I1121 13:55:43.328658 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4hkbp\" (UniqueName: \"kubernetes.io/projected/f6bf4994-1e4e-4b36-96f8-db4c09620dc6-kube-api-access-4hkbp\") pod \"cert-manager-cainjector-7f985d654d-mvs62\" (UID: \"f6bf4994-1e4e-4b36-96f8-db4c09620dc6\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-mvs62" Nov 21 13:55:43 crc kubenswrapper[5133]: I1121 13:55:43.406845 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p9tt6\" (UniqueName: \"kubernetes.io/projected/a3ddb6ce-5d67-47b1-8550-56613fa11579-kube-api-access-p9tt6\") pod \"cert-manager-webhook-5655c58dd6-k9w95\" (UID: \"a3ddb6ce-5d67-47b1-8550-56613fa11579\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-k9w95" Nov 21 13:55:43 crc kubenswrapper[5133]: I1121 13:55:43.425568 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p9tt6\" (UniqueName: \"kubernetes.io/projected/a3ddb6ce-5d67-47b1-8550-56613fa11579-kube-api-access-p9tt6\") pod \"cert-manager-webhook-5655c58dd6-k9w95\" (UID: \"a3ddb6ce-5d67-47b1-8550-56613fa11579\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-k9w95" Nov 21 13:55:43 crc kubenswrapper[5133]: I1121 13:55:43.499562 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-mvs62" Nov 21 13:55:43 crc kubenswrapper[5133]: I1121 13:55:43.522767 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-t5gk2" Nov 21 13:55:43 crc kubenswrapper[5133]: I1121 13:55:43.533281 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-k9w95" Nov 21 13:55:44 crc kubenswrapper[5133]: I1121 13:55:44.003677 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-t5gk2"] Nov 21 13:55:44 crc kubenswrapper[5133]: I1121 13:55:44.007570 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-mvs62"] Nov 21 13:55:44 crc kubenswrapper[5133]: W1121 13:55:44.014887 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod877c323d_4f37_4803_8376_43e5946404e2.slice/crio-41d8e3da48ecec92af9b898683a77a6b0cac8eb731d34502d543994ca8bc1ea9 WatchSource:0}: Error finding container 41d8e3da48ecec92af9b898683a77a6b0cac8eb731d34502d543994ca8bc1ea9: Status 404 returned error can't find the container with id 41d8e3da48ecec92af9b898683a77a6b0cac8eb731d34502d543994ca8bc1ea9 Nov 21 13:55:44 crc kubenswrapper[5133]: W1121 13:55:44.017287 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf6bf4994_1e4e_4b36_96f8_db4c09620dc6.slice/crio-c06abba86bf1908336f35d1dda2a7a5b7fb7af5c97472e4ca9e650ea36a027b4 WatchSource:0}: Error finding container c06abba86bf1908336f35d1dda2a7a5b7fb7af5c97472e4ca9e650ea36a027b4: Status 404 returned error can't find the container with id c06abba86bf1908336f35d1dda2a7a5b7fb7af5c97472e4ca9e650ea36a027b4 Nov 21 13:55:44 crc kubenswrapper[5133]: I1121 13:55:44.019517 5133 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 21 13:55:44 crc kubenswrapper[5133]: I1121 13:55:44.087267 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-k9w95"] Nov 21 13:55:44 crc kubenswrapper[5133]: W1121 13:55:44.093615 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda3ddb6ce_5d67_47b1_8550_56613fa11579.slice/crio-b3b4d92f1f01845d41376676a123a7ac379a3afa3fa26489e2e719329deede40 WatchSource:0}: Error finding container b3b4d92f1f01845d41376676a123a7ac379a3afa3fa26489e2e719329deede40: Status 404 returned error can't find the container with id b3b4d92f1f01845d41376676a123a7ac379a3afa3fa26489e2e719329deede40 Nov 21 13:55:44 crc kubenswrapper[5133]: I1121 13:55:44.315353 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-k9w95" event={"ID":"a3ddb6ce-5d67-47b1-8550-56613fa11579","Type":"ContainerStarted","Data":"b3b4d92f1f01845d41376676a123a7ac379a3afa3fa26489e2e719329deede40"} Nov 21 13:55:44 crc kubenswrapper[5133]: I1121 13:55:44.316574 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-t5gk2" event={"ID":"877c323d-4f37-4803-8376-43e5946404e2","Type":"ContainerStarted","Data":"41d8e3da48ecec92af9b898683a77a6b0cac8eb731d34502d543994ca8bc1ea9"} Nov 21 13:55:44 crc kubenswrapper[5133]: I1121 13:55:44.318063 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-mvs62" event={"ID":"f6bf4994-1e4e-4b36-96f8-db4c09620dc6","Type":"ContainerStarted","Data":"c06abba86bf1908336f35d1dda2a7a5b7fb7af5c97472e4ca9e650ea36a027b4"} Nov 21 13:55:49 crc kubenswrapper[5133]: I1121 13:55:49.353942 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-t5gk2" event={"ID":"877c323d-4f37-4803-8376-43e5946404e2","Type":"ContainerStarted","Data":"3364ba1fbbe54887b4e73dff0edcec54479f5bccacfaa530f4f2a6a053c3352a"} Nov 21 13:55:49 crc kubenswrapper[5133]: I1121 13:55:49.356094 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-mvs62" event={"ID":"f6bf4994-1e4e-4b36-96f8-db4c09620dc6","Type":"ContainerStarted","Data":"07ff1f84135f9906929737bec4af0697d1169f5e8f068c89020851d4ed278067"} Nov 21 13:55:49 crc kubenswrapper[5133]: I1121 13:55:49.376743 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-5b446d88c5-t5gk2" podStartSLOduration=1.6802161820000001 podStartE2EDuration="6.376717622s" podCreationTimestamp="2025-11-21 13:55:43 +0000 UTC" firstStartedPulling="2025-11-21 13:55:44.018690587 +0000 UTC m=+803.816522835" lastFinishedPulling="2025-11-21 13:55:48.715192027 +0000 UTC m=+808.513024275" observedRunningTime="2025-11-21 13:55:49.368472741 +0000 UTC m=+809.166304979" watchObservedRunningTime="2025-11-21 13:55:49.376717622 +0000 UTC m=+809.174549860" Nov 21 13:55:49 crc kubenswrapper[5133]: I1121 13:55:49.394734 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-7f985d654d-mvs62" podStartSLOduration=1.711404599 podStartE2EDuration="6.394709075s" podCreationTimestamp="2025-11-21 13:55:43 +0000 UTC" firstStartedPulling="2025-11-21 13:55:44.021302937 +0000 UTC m=+803.819135185" lastFinishedPulling="2025-11-21 13:55:48.704607403 +0000 UTC m=+808.502439661" observedRunningTime="2025-11-21 13:55:49.391630532 +0000 UTC m=+809.189462810" watchObservedRunningTime="2025-11-21 13:55:49.394709075 +0000 UTC m=+809.192541323" Nov 21 13:55:50 crc kubenswrapper[5133]: I1121 13:55:50.363558 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-k9w95" event={"ID":"a3ddb6ce-5d67-47b1-8550-56613fa11579","Type":"ContainerStarted","Data":"456fd8015ded1a7ce2b8c6e41226f4080c4baf34d9ac11ba08a1bcc40a5a7c48"} Nov 21 13:55:50 crc kubenswrapper[5133]: I1121 13:55:50.364146 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-5655c58dd6-k9w95" Nov 21 13:55:50 crc kubenswrapper[5133]: I1121 13:55:50.383403 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-5655c58dd6-k9w95" podStartSLOduration=1.62751008 podStartE2EDuration="7.383383803s" podCreationTimestamp="2025-11-21 13:55:43 +0000 UTC" firstStartedPulling="2025-11-21 13:55:44.096512063 +0000 UTC m=+803.894344311" lastFinishedPulling="2025-11-21 13:55:49.852385786 +0000 UTC m=+809.650218034" observedRunningTime="2025-11-21 13:55:50.381991665 +0000 UTC m=+810.179823933" watchObservedRunningTime="2025-11-21 13:55:50.383383803 +0000 UTC m=+810.181216051" Nov 21 13:55:53 crc kubenswrapper[5133]: I1121 13:55:53.311112 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 13:55:53 crc kubenswrapper[5133]: I1121 13:55:53.311568 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 13:55:53 crc kubenswrapper[5133]: I1121 13:55:53.411226 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-tjzm8"] Nov 21 13:55:53 crc kubenswrapper[5133]: I1121 13:55:53.411764 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" podUID="373d5da7-fae9-4689-9ede-6e2d69a54c02" containerName="nbdb" containerID="cri-o://c53aca99f41348a8343f7a2a2afd9ca78e2e4ba6aae9bb06cdb3ed66c9d79aa8" gracePeriod=30 Nov 21 13:55:53 crc kubenswrapper[5133]: I1121 13:55:53.411910 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" podUID="373d5da7-fae9-4689-9ede-6e2d69a54c02" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://5b6bfce121246f367a034c172b839a31fe309cfc0f83db7ab4e48cb26d6a5145" gracePeriod=30 Nov 21 13:55:53 crc kubenswrapper[5133]: I1121 13:55:53.412187 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" podUID="373d5da7-fae9-4689-9ede-6e2d69a54c02" containerName="northd" containerID="cri-o://94aee1dbbc6cd90fac255e86ddb27f159eba2e08dc6cc749a8eb351842330ee6" gracePeriod=30 Nov 21 13:55:53 crc kubenswrapper[5133]: I1121 13:55:53.411941 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" podUID="373d5da7-fae9-4689-9ede-6e2d69a54c02" containerName="sbdb" containerID="cri-o://6ab3fdf87c8fc052cd429333579ede0e857fcc8399de947f661b159e6a5f2a93" gracePeriod=30 Nov 21 13:55:53 crc kubenswrapper[5133]: I1121 13:55:53.412286 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" podUID="373d5da7-fae9-4689-9ede-6e2d69a54c02" containerName="ovn-acl-logging" containerID="cri-o://563b9e061f37ddab57173a01efbf7bf025c470edccc47a03e7c5bb1e317a289f" gracePeriod=30 Nov 21 13:55:53 crc kubenswrapper[5133]: I1121 13:55:53.412357 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" podUID="373d5da7-fae9-4689-9ede-6e2d69a54c02" containerName="kube-rbac-proxy-node" containerID="cri-o://ce076f27563e648bcbfd183634e87e0e31cedc359d0df1edc6af448b2a18f1a1" gracePeriod=30 Nov 21 13:55:53 crc kubenswrapper[5133]: I1121 13:55:53.412503 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" podUID="373d5da7-fae9-4689-9ede-6e2d69a54c02" containerName="ovn-controller" containerID="cri-o://fd5003cc4327d8234259623232e844463af0efdc0b3e395fa3e2c30c714b872d" gracePeriod=30 Nov 21 13:55:53 crc kubenswrapper[5133]: I1121 13:55:53.455704 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" podUID="373d5da7-fae9-4689-9ede-6e2d69a54c02" containerName="ovnkube-controller" containerID="cri-o://34f54716d64105c1d42d53b9d765693c455371991e82a8f605faf9074e0788f4" gracePeriod=30 Nov 21 13:55:53 crc kubenswrapper[5133]: E1121 13:55:53.654580 5133 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 6ab3fdf87c8fc052cd429333579ede0e857fcc8399de947f661b159e6a5f2a93 is running failed: container process not found" containerID="6ab3fdf87c8fc052cd429333579ede0e857fcc8399de947f661b159e6a5f2a93" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"sb\"\n"] Nov 21 13:55:53 crc kubenswrapper[5133]: E1121 13:55:53.654607 5133 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of c53aca99f41348a8343f7a2a2afd9ca78e2e4ba6aae9bb06cdb3ed66c9d79aa8 is running failed: container process not found" containerID="c53aca99f41348a8343f7a2a2afd9ca78e2e4ba6aae9bb06cdb3ed66c9d79aa8" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"nb\"\n"] Nov 21 13:55:53 crc kubenswrapper[5133]: E1121 13:55:53.654725 5133 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 34f54716d64105c1d42d53b9d765693c455371991e82a8f605faf9074e0788f4 is running failed: container process not found" containerID="34f54716d64105c1d42d53b9d765693c455371991e82a8f605faf9074e0788f4" cmd=["/bin/bash","-c","#!/bin/bash\ntest -f /etc/cni/net.d/10-ovn-kubernetes.conf\n"] Nov 21 13:55:53 crc kubenswrapper[5133]: E1121 13:55:53.655216 5133 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 6ab3fdf87c8fc052cd429333579ede0e857fcc8399de947f661b159e6a5f2a93 is running failed: container process not found" containerID="6ab3fdf87c8fc052cd429333579ede0e857fcc8399de947f661b159e6a5f2a93" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"sb\"\n"] Nov 21 13:55:53 crc kubenswrapper[5133]: E1121 13:55:53.655468 5133 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of c53aca99f41348a8343f7a2a2afd9ca78e2e4ba6aae9bb06cdb3ed66c9d79aa8 is running failed: container process not found" containerID="c53aca99f41348a8343f7a2a2afd9ca78e2e4ba6aae9bb06cdb3ed66c9d79aa8" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"nb\"\n"] Nov 21 13:55:53 crc kubenswrapper[5133]: E1121 13:55:53.655489 5133 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 34f54716d64105c1d42d53b9d765693c455371991e82a8f605faf9074e0788f4 is running failed: container process not found" containerID="34f54716d64105c1d42d53b9d765693c455371991e82a8f605faf9074e0788f4" cmd=["/bin/bash","-c","#!/bin/bash\ntest -f /etc/cni/net.d/10-ovn-kubernetes.conf\n"] Nov 21 13:55:53 crc kubenswrapper[5133]: E1121 13:55:53.655812 5133 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 6ab3fdf87c8fc052cd429333579ede0e857fcc8399de947f661b159e6a5f2a93 is running failed: container process not found" containerID="6ab3fdf87c8fc052cd429333579ede0e857fcc8399de947f661b159e6a5f2a93" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"sb\"\n"] Nov 21 13:55:53 crc kubenswrapper[5133]: E1121 13:55:53.656056 5133 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 6ab3fdf87c8fc052cd429333579ede0e857fcc8399de947f661b159e6a5f2a93 is running failed: container process not found" probeType="Readiness" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" podUID="373d5da7-fae9-4689-9ede-6e2d69a54c02" containerName="sbdb" Nov 21 13:55:53 crc kubenswrapper[5133]: E1121 13:55:53.656049 5133 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 34f54716d64105c1d42d53b9d765693c455371991e82a8f605faf9074e0788f4 is running failed: container process not found" containerID="34f54716d64105c1d42d53b9d765693c455371991e82a8f605faf9074e0788f4" cmd=["/bin/bash","-c","#!/bin/bash\ntest -f /etc/cni/net.d/10-ovn-kubernetes.conf\n"] Nov 21 13:55:53 crc kubenswrapper[5133]: E1121 13:55:53.656123 5133 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 34f54716d64105c1d42d53b9d765693c455371991e82a8f605faf9074e0788f4 is running failed: container process not found" probeType="Readiness" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" podUID="373d5da7-fae9-4689-9ede-6e2d69a54c02" containerName="ovnkube-controller" Nov 21 13:55:53 crc kubenswrapper[5133]: E1121 13:55:53.656076 5133 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of c53aca99f41348a8343f7a2a2afd9ca78e2e4ba6aae9bb06cdb3ed66c9d79aa8 is running failed: container process not found" containerID="c53aca99f41348a8343f7a2a2afd9ca78e2e4ba6aae9bb06cdb3ed66c9d79aa8" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"nb\"\n"] Nov 21 13:55:53 crc kubenswrapper[5133]: E1121 13:55:53.656205 5133 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of c53aca99f41348a8343f7a2a2afd9ca78e2e4ba6aae9bb06cdb3ed66c9d79aa8 is running failed: container process not found" probeType="Readiness" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" podUID="373d5da7-fae9-4689-9ede-6e2d69a54c02" containerName="nbdb" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.165385 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-tjzm8_373d5da7-fae9-4689-9ede-6e2d69a54c02/ovnkube-controller/3.log" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.168778 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-tjzm8_373d5da7-fae9-4689-9ede-6e2d69a54c02/ovn-acl-logging/0.log" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.169458 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-tjzm8_373d5da7-fae9-4689-9ede-6e2d69a54c02/ovn-controller/0.log" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.170043 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.205832 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-host-var-lib-cni-networks-ovn-kubernetes\") pod \"373d5da7-fae9-4689-9ede-6e2d69a54c02\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.205930 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-host-run-ovn-kubernetes\") pod \"373d5da7-fae9-4689-9ede-6e2d69a54c02\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.206029 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "373d5da7-fae9-4689-9ede-6e2d69a54c02" (UID: "373d5da7-fae9-4689-9ede-6e2d69a54c02"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.206061 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-host-slash\") pod \"373d5da7-fae9-4689-9ede-6e2d69a54c02\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.206096 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "373d5da7-fae9-4689-9ede-6e2d69a54c02" (UID: "373d5da7-fae9-4689-9ede-6e2d69a54c02"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.206105 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-host-run-netns\") pod \"373d5da7-fae9-4689-9ede-6e2d69a54c02\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.206170 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-etc-openvswitch\") pod \"373d5da7-fae9-4689-9ede-6e2d69a54c02\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.206120 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-host-slash" (OuterVolumeSpecName: "host-slash") pod "373d5da7-fae9-4689-9ede-6e2d69a54c02" (UID: "373d5da7-fae9-4689-9ede-6e2d69a54c02"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.206228 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-run-ovn\") pod \"373d5da7-fae9-4689-9ede-6e2d69a54c02\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.206138 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "373d5da7-fae9-4689-9ede-6e2d69a54c02" (UID: "373d5da7-fae9-4689-9ede-6e2d69a54c02"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.206245 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "373d5da7-fae9-4689-9ede-6e2d69a54c02" (UID: "373d5da7-fae9-4689-9ede-6e2d69a54c02"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.206274 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-host-cni-netd\") pod \"373d5da7-fae9-4689-9ede-6e2d69a54c02\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.206341 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lr2l4\" (UniqueName: \"kubernetes.io/projected/373d5da7-fae9-4689-9ede-6e2d69a54c02-kube-api-access-lr2l4\") pod \"373d5da7-fae9-4689-9ede-6e2d69a54c02\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.206372 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "373d5da7-fae9-4689-9ede-6e2d69a54c02" (UID: "373d5da7-fae9-4689-9ede-6e2d69a54c02"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.206396 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/373d5da7-fae9-4689-9ede-6e2d69a54c02-ovnkube-config\") pod \"373d5da7-fae9-4689-9ede-6e2d69a54c02\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.206419 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "373d5da7-fae9-4689-9ede-6e2d69a54c02" (UID: "373d5da7-fae9-4689-9ede-6e2d69a54c02"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.206528 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/373d5da7-fae9-4689-9ede-6e2d69a54c02-env-overrides\") pod \"373d5da7-fae9-4689-9ede-6e2d69a54c02\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.206562 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-log-socket\") pod \"373d5da7-fae9-4689-9ede-6e2d69a54c02\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.206593 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-systemd-units\") pod \"373d5da7-fae9-4689-9ede-6e2d69a54c02\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.206625 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-host-kubelet\") pod \"373d5da7-fae9-4689-9ede-6e2d69a54c02\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.206658 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/373d5da7-fae9-4689-9ede-6e2d69a54c02-ovn-node-metrics-cert\") pod \"373d5da7-fae9-4689-9ede-6e2d69a54c02\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.206674 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-log-socket" (OuterVolumeSpecName: "log-socket") pod "373d5da7-fae9-4689-9ede-6e2d69a54c02" (UID: "373d5da7-fae9-4689-9ede-6e2d69a54c02"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.206698 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-host-cni-bin\") pod \"373d5da7-fae9-4689-9ede-6e2d69a54c02\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.206710 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "373d5da7-fae9-4689-9ede-6e2d69a54c02" (UID: "373d5da7-fae9-4689-9ede-6e2d69a54c02"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.206726 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-run-systemd\") pod \"373d5da7-fae9-4689-9ede-6e2d69a54c02\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.206737 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "373d5da7-fae9-4689-9ede-6e2d69a54c02" (UID: "373d5da7-fae9-4689-9ede-6e2d69a54c02"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.206754 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-run-openvswitch\") pod \"373d5da7-fae9-4689-9ede-6e2d69a54c02\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.206754 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "373d5da7-fae9-4689-9ede-6e2d69a54c02" (UID: "373d5da7-fae9-4689-9ede-6e2d69a54c02"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.206811 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-node-log\") pod \"373d5da7-fae9-4689-9ede-6e2d69a54c02\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.206852 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/373d5da7-fae9-4689-9ede-6e2d69a54c02-ovnkube-script-lib\") pod \"373d5da7-fae9-4689-9ede-6e2d69a54c02\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.206873 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-var-lib-openvswitch\") pod \"373d5da7-fae9-4689-9ede-6e2d69a54c02\" (UID: \"373d5da7-fae9-4689-9ede-6e2d69a54c02\") " Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.206901 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-node-log" (OuterVolumeSpecName: "node-log") pod "373d5da7-fae9-4689-9ede-6e2d69a54c02" (UID: "373d5da7-fae9-4689-9ede-6e2d69a54c02"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.206938 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "373d5da7-fae9-4689-9ede-6e2d69a54c02" (UID: "373d5da7-fae9-4689-9ede-6e2d69a54c02"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.207279 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/373d5da7-fae9-4689-9ede-6e2d69a54c02-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "373d5da7-fae9-4689-9ede-6e2d69a54c02" (UID: "373d5da7-fae9-4689-9ede-6e2d69a54c02"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.207307 5133 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-run-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.207327 5133 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-node-log\") on node \"crc\" DevicePath \"\"" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.207342 5133 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.207363 5133 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.207377 5133 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-host-slash\") on node \"crc\" DevicePath \"\"" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.207390 5133 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-host-run-netns\") on node \"crc\" DevicePath \"\"" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.207391 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "373d5da7-fae9-4689-9ede-6e2d69a54c02" (UID: "373d5da7-fae9-4689-9ede-6e2d69a54c02"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.207400 5133 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.207449 5133 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.207467 5133 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-host-cni-netd\") on node \"crc\" DevicePath \"\"" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.207480 5133 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-log-socket\") on node \"crc\" DevicePath \"\"" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.207397 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/373d5da7-fae9-4689-9ede-6e2d69a54c02-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "373d5da7-fae9-4689-9ede-6e2d69a54c02" (UID: "373d5da7-fae9-4689-9ede-6e2d69a54c02"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.207493 5133 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-systemd-units\") on node \"crc\" DevicePath \"\"" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.207535 5133 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-host-kubelet\") on node \"crc\" DevicePath \"\"" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.207554 5133 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-host-cni-bin\") on node \"crc\" DevicePath \"\"" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.207418 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/373d5da7-fae9-4689-9ede-6e2d69a54c02-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "373d5da7-fae9-4689-9ede-6e2d69a54c02" (UID: "373d5da7-fae9-4689-9ede-6e2d69a54c02"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.213958 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/373d5da7-fae9-4689-9ede-6e2d69a54c02-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "373d5da7-fae9-4689-9ede-6e2d69a54c02" (UID: "373d5da7-fae9-4689-9ede-6e2d69a54c02"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.214486 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/373d5da7-fae9-4689-9ede-6e2d69a54c02-kube-api-access-lr2l4" (OuterVolumeSpecName: "kube-api-access-lr2l4") pod "373d5da7-fae9-4689-9ede-6e2d69a54c02" (UID: "373d5da7-fae9-4689-9ede-6e2d69a54c02"). InnerVolumeSpecName "kube-api-access-lr2l4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.226277 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "373d5da7-fae9-4689-9ede-6e2d69a54c02" (UID: "373d5da7-fae9-4689-9ede-6e2d69a54c02"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.232039 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-tsfg6"] Nov 21 13:55:54 crc kubenswrapper[5133]: E1121 13:55:54.232304 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="373d5da7-fae9-4689-9ede-6e2d69a54c02" containerName="ovn-controller" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.232331 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="373d5da7-fae9-4689-9ede-6e2d69a54c02" containerName="ovn-controller" Nov 21 13:55:54 crc kubenswrapper[5133]: E1121 13:55:54.232350 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="373d5da7-fae9-4689-9ede-6e2d69a54c02" containerName="ovnkube-controller" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.232361 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="373d5da7-fae9-4689-9ede-6e2d69a54c02" containerName="ovnkube-controller" Nov 21 13:55:54 crc kubenswrapper[5133]: E1121 13:55:54.232369 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="373d5da7-fae9-4689-9ede-6e2d69a54c02" containerName="ovnkube-controller" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.232378 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="373d5da7-fae9-4689-9ede-6e2d69a54c02" containerName="ovnkube-controller" Nov 21 13:55:54 crc kubenswrapper[5133]: E1121 13:55:54.232389 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="373d5da7-fae9-4689-9ede-6e2d69a54c02" containerName="kube-rbac-proxy-ovn-metrics" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.232396 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="373d5da7-fae9-4689-9ede-6e2d69a54c02" containerName="kube-rbac-proxy-ovn-metrics" Nov 21 13:55:54 crc kubenswrapper[5133]: E1121 13:55:54.232406 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="373d5da7-fae9-4689-9ede-6e2d69a54c02" containerName="ovnkube-controller" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.232412 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="373d5da7-fae9-4689-9ede-6e2d69a54c02" containerName="ovnkube-controller" Nov 21 13:55:54 crc kubenswrapper[5133]: E1121 13:55:54.232423 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="373d5da7-fae9-4689-9ede-6e2d69a54c02" containerName="nbdb" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.232432 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="373d5da7-fae9-4689-9ede-6e2d69a54c02" containerName="nbdb" Nov 21 13:55:54 crc kubenswrapper[5133]: E1121 13:55:54.232442 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="373d5da7-fae9-4689-9ede-6e2d69a54c02" containerName="northd" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.232450 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="373d5da7-fae9-4689-9ede-6e2d69a54c02" containerName="northd" Nov 21 13:55:54 crc kubenswrapper[5133]: E1121 13:55:54.232464 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="373d5da7-fae9-4689-9ede-6e2d69a54c02" containerName="ovn-acl-logging" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.232471 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="373d5da7-fae9-4689-9ede-6e2d69a54c02" containerName="ovn-acl-logging" Nov 21 13:55:54 crc kubenswrapper[5133]: E1121 13:55:54.232481 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="373d5da7-fae9-4689-9ede-6e2d69a54c02" containerName="sbdb" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.232488 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="373d5da7-fae9-4689-9ede-6e2d69a54c02" containerName="sbdb" Nov 21 13:55:54 crc kubenswrapper[5133]: E1121 13:55:54.232503 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="373d5da7-fae9-4689-9ede-6e2d69a54c02" containerName="kubecfg-setup" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.232510 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="373d5da7-fae9-4689-9ede-6e2d69a54c02" containerName="kubecfg-setup" Nov 21 13:55:54 crc kubenswrapper[5133]: E1121 13:55:54.232524 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="373d5da7-fae9-4689-9ede-6e2d69a54c02" containerName="kube-rbac-proxy-node" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.232532 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="373d5da7-fae9-4689-9ede-6e2d69a54c02" containerName="kube-rbac-proxy-node" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.232643 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="373d5da7-fae9-4689-9ede-6e2d69a54c02" containerName="kube-rbac-proxy-node" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.232660 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="373d5da7-fae9-4689-9ede-6e2d69a54c02" containerName="sbdb" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.232674 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="373d5da7-fae9-4689-9ede-6e2d69a54c02" containerName="kube-rbac-proxy-ovn-metrics" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.232687 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="373d5da7-fae9-4689-9ede-6e2d69a54c02" containerName="ovnkube-controller" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.232721 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="373d5da7-fae9-4689-9ede-6e2d69a54c02" containerName="ovnkube-controller" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.232730 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="373d5da7-fae9-4689-9ede-6e2d69a54c02" containerName="ovn-acl-logging" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.232742 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="373d5da7-fae9-4689-9ede-6e2d69a54c02" containerName="northd" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.232753 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="373d5da7-fae9-4689-9ede-6e2d69a54c02" containerName="nbdb" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.232763 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="373d5da7-fae9-4689-9ede-6e2d69a54c02" containerName="ovnkube-controller" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.232773 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="373d5da7-fae9-4689-9ede-6e2d69a54c02" containerName="ovnkube-controller" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.232783 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="373d5da7-fae9-4689-9ede-6e2d69a54c02" containerName="ovn-controller" Nov 21 13:55:54 crc kubenswrapper[5133]: E1121 13:55:54.232897 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="373d5da7-fae9-4689-9ede-6e2d69a54c02" containerName="ovnkube-controller" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.232908 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="373d5da7-fae9-4689-9ede-6e2d69a54c02" containerName="ovnkube-controller" Nov 21 13:55:54 crc kubenswrapper[5133]: E1121 13:55:54.232918 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="373d5da7-fae9-4689-9ede-6e2d69a54c02" containerName="ovnkube-controller" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.232925 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="373d5da7-fae9-4689-9ede-6e2d69a54c02" containerName="ovnkube-controller" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.233059 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="373d5da7-fae9-4689-9ede-6e2d69a54c02" containerName="ovnkube-controller" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.234904 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.307793 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/956acd53-2f4c-40e5-8c85-afcf06bba86a-run-systemd\") pod \"ovnkube-node-tsfg6\" (UID: \"956acd53-2f4c-40e5-8c85-afcf06bba86a\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.307837 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/956acd53-2f4c-40e5-8c85-afcf06bba86a-host-run-netns\") pod \"ovnkube-node-tsfg6\" (UID: \"956acd53-2f4c-40e5-8c85-afcf06bba86a\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.307858 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/956acd53-2f4c-40e5-8c85-afcf06bba86a-ovnkube-config\") pod \"ovnkube-node-tsfg6\" (UID: \"956acd53-2f4c-40e5-8c85-afcf06bba86a\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.308049 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/956acd53-2f4c-40e5-8c85-afcf06bba86a-host-slash\") pod \"ovnkube-node-tsfg6\" (UID: \"956acd53-2f4c-40e5-8c85-afcf06bba86a\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.308109 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/956acd53-2f4c-40e5-8c85-afcf06bba86a-run-openvswitch\") pod \"ovnkube-node-tsfg6\" (UID: \"956acd53-2f4c-40e5-8c85-afcf06bba86a\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.308149 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/956acd53-2f4c-40e5-8c85-afcf06bba86a-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-tsfg6\" (UID: \"956acd53-2f4c-40e5-8c85-afcf06bba86a\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.308226 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/956acd53-2f4c-40e5-8c85-afcf06bba86a-host-cni-netd\") pod \"ovnkube-node-tsfg6\" (UID: \"956acd53-2f4c-40e5-8c85-afcf06bba86a\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.308267 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/956acd53-2f4c-40e5-8c85-afcf06bba86a-node-log\") pod \"ovnkube-node-tsfg6\" (UID: \"956acd53-2f4c-40e5-8c85-afcf06bba86a\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.308330 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/956acd53-2f4c-40e5-8c85-afcf06bba86a-host-cni-bin\") pod \"ovnkube-node-tsfg6\" (UID: \"956acd53-2f4c-40e5-8c85-afcf06bba86a\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.308353 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/956acd53-2f4c-40e5-8c85-afcf06bba86a-env-overrides\") pod \"ovnkube-node-tsfg6\" (UID: \"956acd53-2f4c-40e5-8c85-afcf06bba86a\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.308401 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/956acd53-2f4c-40e5-8c85-afcf06bba86a-host-kubelet\") pod \"ovnkube-node-tsfg6\" (UID: \"956acd53-2f4c-40e5-8c85-afcf06bba86a\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.308434 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/956acd53-2f4c-40e5-8c85-afcf06bba86a-var-lib-openvswitch\") pod \"ovnkube-node-tsfg6\" (UID: \"956acd53-2f4c-40e5-8c85-afcf06bba86a\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.308544 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/956acd53-2f4c-40e5-8c85-afcf06bba86a-etc-openvswitch\") pod \"ovnkube-node-tsfg6\" (UID: \"956acd53-2f4c-40e5-8c85-afcf06bba86a\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.308591 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/956acd53-2f4c-40e5-8c85-afcf06bba86a-systemd-units\") pod \"ovnkube-node-tsfg6\" (UID: \"956acd53-2f4c-40e5-8c85-afcf06bba86a\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.308618 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4qsdw\" (UniqueName: \"kubernetes.io/projected/956acd53-2f4c-40e5-8c85-afcf06bba86a-kube-api-access-4qsdw\") pod \"ovnkube-node-tsfg6\" (UID: \"956acd53-2f4c-40e5-8c85-afcf06bba86a\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.308658 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/956acd53-2f4c-40e5-8c85-afcf06bba86a-ovn-node-metrics-cert\") pod \"ovnkube-node-tsfg6\" (UID: \"956acd53-2f4c-40e5-8c85-afcf06bba86a\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.308711 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/956acd53-2f4c-40e5-8c85-afcf06bba86a-ovnkube-script-lib\") pod \"ovnkube-node-tsfg6\" (UID: \"956acd53-2f4c-40e5-8c85-afcf06bba86a\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.308832 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/956acd53-2f4c-40e5-8c85-afcf06bba86a-run-ovn\") pod \"ovnkube-node-tsfg6\" (UID: \"956acd53-2f4c-40e5-8c85-afcf06bba86a\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.308863 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/956acd53-2f4c-40e5-8c85-afcf06bba86a-host-run-ovn-kubernetes\") pod \"ovnkube-node-tsfg6\" (UID: \"956acd53-2f4c-40e5-8c85-afcf06bba86a\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.308905 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/956acd53-2f4c-40e5-8c85-afcf06bba86a-log-socket\") pod \"ovnkube-node-tsfg6\" (UID: \"956acd53-2f4c-40e5-8c85-afcf06bba86a\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.309019 5133 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.309042 5133 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/373d5da7-fae9-4689-9ede-6e2d69a54c02-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.309063 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lr2l4\" (UniqueName: \"kubernetes.io/projected/373d5da7-fae9-4689-9ede-6e2d69a54c02-kube-api-access-lr2l4\") on node \"crc\" DevicePath \"\"" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.309077 5133 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/373d5da7-fae9-4689-9ede-6e2d69a54c02-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.309094 5133 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/373d5da7-fae9-4689-9ede-6e2d69a54c02-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.309106 5133 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/373d5da7-fae9-4689-9ede-6e2d69a54c02-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.309118 5133 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/373d5da7-fae9-4689-9ede-6e2d69a54c02-run-systemd\") on node \"crc\" DevicePath \"\"" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.390614 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-m5d24_0077329a-abad-4c6d-a601-2dc01fd83184/kube-multus/2.log" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.391353 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-m5d24_0077329a-abad-4c6d-a601-2dc01fd83184/kube-multus/1.log" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.391421 5133 generic.go:334] "Generic (PLEG): container finished" podID="0077329a-abad-4c6d-a601-2dc01fd83184" containerID="4e63463f8206d5113d6726032b5b42f65d84c6b36737cf2413454c4d8a340b2c" exitCode=2 Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.391530 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-m5d24" event={"ID":"0077329a-abad-4c6d-a601-2dc01fd83184","Type":"ContainerDied","Data":"4e63463f8206d5113d6726032b5b42f65d84c6b36737cf2413454c4d8a340b2c"} Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.391588 5133 scope.go:117] "RemoveContainer" containerID="49d78ec51ff96fc1ff9c0e668751296475767d8bf09673ff5d0f1a9d896d6595" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.392512 5133 scope.go:117] "RemoveContainer" containerID="4e63463f8206d5113d6726032b5b42f65d84c6b36737cf2413454c4d8a340b2c" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.396926 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-tjzm8_373d5da7-fae9-4689-9ede-6e2d69a54c02/ovnkube-controller/3.log" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.399855 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-tjzm8_373d5da7-fae9-4689-9ede-6e2d69a54c02/ovn-acl-logging/0.log" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.400615 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-tjzm8_373d5da7-fae9-4689-9ede-6e2d69a54c02/ovn-controller/0.log" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.401429 5133 generic.go:334] "Generic (PLEG): container finished" podID="373d5da7-fae9-4689-9ede-6e2d69a54c02" containerID="34f54716d64105c1d42d53b9d765693c455371991e82a8f605faf9074e0788f4" exitCode=0 Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.401456 5133 generic.go:334] "Generic (PLEG): container finished" podID="373d5da7-fae9-4689-9ede-6e2d69a54c02" containerID="6ab3fdf87c8fc052cd429333579ede0e857fcc8399de947f661b159e6a5f2a93" exitCode=0 Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.401466 5133 generic.go:334] "Generic (PLEG): container finished" podID="373d5da7-fae9-4689-9ede-6e2d69a54c02" containerID="c53aca99f41348a8343f7a2a2afd9ca78e2e4ba6aae9bb06cdb3ed66c9d79aa8" exitCode=0 Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.401476 5133 generic.go:334] "Generic (PLEG): container finished" podID="373d5da7-fae9-4689-9ede-6e2d69a54c02" containerID="94aee1dbbc6cd90fac255e86ddb27f159eba2e08dc6cc749a8eb351842330ee6" exitCode=0 Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.401485 5133 generic.go:334] "Generic (PLEG): container finished" podID="373d5da7-fae9-4689-9ede-6e2d69a54c02" containerID="5b6bfce121246f367a034c172b839a31fe309cfc0f83db7ab4e48cb26d6a5145" exitCode=0 Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.401493 5133 generic.go:334] "Generic (PLEG): container finished" podID="373d5da7-fae9-4689-9ede-6e2d69a54c02" containerID="ce076f27563e648bcbfd183634e87e0e31cedc359d0df1edc6af448b2a18f1a1" exitCode=0 Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.401503 5133 generic.go:334] "Generic (PLEG): container finished" podID="373d5da7-fae9-4689-9ede-6e2d69a54c02" containerID="563b9e061f37ddab57173a01efbf7bf025c470edccc47a03e7c5bb1e317a289f" exitCode=143 Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.401512 5133 generic.go:334] "Generic (PLEG): container finished" podID="373d5da7-fae9-4689-9ede-6e2d69a54c02" containerID="fd5003cc4327d8234259623232e844463af0efdc0b3e395fa3e2c30c714b872d" exitCode=143 Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.401520 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" event={"ID":"373d5da7-fae9-4689-9ede-6e2d69a54c02","Type":"ContainerDied","Data":"34f54716d64105c1d42d53b9d765693c455371991e82a8f605faf9074e0788f4"} Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.401573 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" event={"ID":"373d5da7-fae9-4689-9ede-6e2d69a54c02","Type":"ContainerDied","Data":"6ab3fdf87c8fc052cd429333579ede0e857fcc8399de947f661b159e6a5f2a93"} Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.401589 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" event={"ID":"373d5da7-fae9-4689-9ede-6e2d69a54c02","Type":"ContainerDied","Data":"c53aca99f41348a8343f7a2a2afd9ca78e2e4ba6aae9bb06cdb3ed66c9d79aa8"} Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.401603 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" event={"ID":"373d5da7-fae9-4689-9ede-6e2d69a54c02","Type":"ContainerDied","Data":"94aee1dbbc6cd90fac255e86ddb27f159eba2e08dc6cc749a8eb351842330ee6"} Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.401616 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" event={"ID":"373d5da7-fae9-4689-9ede-6e2d69a54c02","Type":"ContainerDied","Data":"5b6bfce121246f367a034c172b839a31fe309cfc0f83db7ab4e48cb26d6a5145"} Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.401845 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.405130 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" event={"ID":"373d5da7-fae9-4689-9ede-6e2d69a54c02","Type":"ContainerDied","Data":"ce076f27563e648bcbfd183634e87e0e31cedc359d0df1edc6af448b2a18f1a1"} Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.405190 5133 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"34f54716d64105c1d42d53b9d765693c455371991e82a8f605faf9074e0788f4"} Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.405207 5133 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"5ccbca4b83db30237624e807299bb17bb84a66216f27148373ea648a4c0cc962"} Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.405215 5133 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"6ab3fdf87c8fc052cd429333579ede0e857fcc8399de947f661b159e6a5f2a93"} Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.405222 5133 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c53aca99f41348a8343f7a2a2afd9ca78e2e4ba6aae9bb06cdb3ed66c9d79aa8"} Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.405228 5133 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"94aee1dbbc6cd90fac255e86ddb27f159eba2e08dc6cc749a8eb351842330ee6"} Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.405235 5133 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"5b6bfce121246f367a034c172b839a31fe309cfc0f83db7ab4e48cb26d6a5145"} Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.405242 5133 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ce076f27563e648bcbfd183634e87e0e31cedc359d0df1edc6af448b2a18f1a1"} Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.405249 5133 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"563b9e061f37ddab57173a01efbf7bf025c470edccc47a03e7c5bb1e317a289f"} Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.405255 5133 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"fd5003cc4327d8234259623232e844463af0efdc0b3e395fa3e2c30c714b872d"} Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.405262 5133 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b"} Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.405272 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" event={"ID":"373d5da7-fae9-4689-9ede-6e2d69a54c02","Type":"ContainerDied","Data":"563b9e061f37ddab57173a01efbf7bf025c470edccc47a03e7c5bb1e317a289f"} Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.405288 5133 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"34f54716d64105c1d42d53b9d765693c455371991e82a8f605faf9074e0788f4"} Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.405295 5133 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"5ccbca4b83db30237624e807299bb17bb84a66216f27148373ea648a4c0cc962"} Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.405302 5133 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"6ab3fdf87c8fc052cd429333579ede0e857fcc8399de947f661b159e6a5f2a93"} Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.405308 5133 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c53aca99f41348a8343f7a2a2afd9ca78e2e4ba6aae9bb06cdb3ed66c9d79aa8"} Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.405315 5133 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"94aee1dbbc6cd90fac255e86ddb27f159eba2e08dc6cc749a8eb351842330ee6"} Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.405321 5133 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"5b6bfce121246f367a034c172b839a31fe309cfc0f83db7ab4e48cb26d6a5145"} Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.405328 5133 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ce076f27563e648bcbfd183634e87e0e31cedc359d0df1edc6af448b2a18f1a1"} Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.405333 5133 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"563b9e061f37ddab57173a01efbf7bf025c470edccc47a03e7c5bb1e317a289f"} Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.405339 5133 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"fd5003cc4327d8234259623232e844463af0efdc0b3e395fa3e2c30c714b872d"} Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.405345 5133 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b"} Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.405355 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" event={"ID":"373d5da7-fae9-4689-9ede-6e2d69a54c02","Type":"ContainerDied","Data":"fd5003cc4327d8234259623232e844463af0efdc0b3e395fa3e2c30c714b872d"} Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.405365 5133 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"34f54716d64105c1d42d53b9d765693c455371991e82a8f605faf9074e0788f4"} Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.405374 5133 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"5ccbca4b83db30237624e807299bb17bb84a66216f27148373ea648a4c0cc962"} Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.405381 5133 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"6ab3fdf87c8fc052cd429333579ede0e857fcc8399de947f661b159e6a5f2a93"} Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.405388 5133 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c53aca99f41348a8343f7a2a2afd9ca78e2e4ba6aae9bb06cdb3ed66c9d79aa8"} Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.405395 5133 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"94aee1dbbc6cd90fac255e86ddb27f159eba2e08dc6cc749a8eb351842330ee6"} Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.405400 5133 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"5b6bfce121246f367a034c172b839a31fe309cfc0f83db7ab4e48cb26d6a5145"} Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.405408 5133 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ce076f27563e648bcbfd183634e87e0e31cedc359d0df1edc6af448b2a18f1a1"} Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.405415 5133 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"563b9e061f37ddab57173a01efbf7bf025c470edccc47a03e7c5bb1e317a289f"} Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.405421 5133 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"fd5003cc4327d8234259623232e844463af0efdc0b3e395fa3e2c30c714b872d"} Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.405428 5133 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b"} Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.405437 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tjzm8" event={"ID":"373d5da7-fae9-4689-9ede-6e2d69a54c02","Type":"ContainerDied","Data":"05132343d4c1747126533aa575d4729775e3869e00592199528d1b445ef158ae"} Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.405451 5133 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"34f54716d64105c1d42d53b9d765693c455371991e82a8f605faf9074e0788f4"} Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.405458 5133 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"5ccbca4b83db30237624e807299bb17bb84a66216f27148373ea648a4c0cc962"} Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.405466 5133 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"6ab3fdf87c8fc052cd429333579ede0e857fcc8399de947f661b159e6a5f2a93"} Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.405472 5133 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c53aca99f41348a8343f7a2a2afd9ca78e2e4ba6aae9bb06cdb3ed66c9d79aa8"} Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.405481 5133 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"94aee1dbbc6cd90fac255e86ddb27f159eba2e08dc6cc749a8eb351842330ee6"} Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.405488 5133 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"5b6bfce121246f367a034c172b839a31fe309cfc0f83db7ab4e48cb26d6a5145"} Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.405496 5133 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ce076f27563e648bcbfd183634e87e0e31cedc359d0df1edc6af448b2a18f1a1"} Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.405503 5133 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"563b9e061f37ddab57173a01efbf7bf025c470edccc47a03e7c5bb1e317a289f"} Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.405509 5133 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"fd5003cc4327d8234259623232e844463af0efdc0b3e395fa3e2c30c714b872d"} Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.405515 5133 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b"} Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.411286 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/956acd53-2f4c-40e5-8c85-afcf06bba86a-node-log\") pod \"ovnkube-node-tsfg6\" (UID: \"956acd53-2f4c-40e5-8c85-afcf06bba86a\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.411349 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/956acd53-2f4c-40e5-8c85-afcf06bba86a-host-cni-bin\") pod \"ovnkube-node-tsfg6\" (UID: \"956acd53-2f4c-40e5-8c85-afcf06bba86a\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.411379 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/956acd53-2f4c-40e5-8c85-afcf06bba86a-env-overrides\") pod \"ovnkube-node-tsfg6\" (UID: \"956acd53-2f4c-40e5-8c85-afcf06bba86a\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.411403 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/956acd53-2f4c-40e5-8c85-afcf06bba86a-host-kubelet\") pod \"ovnkube-node-tsfg6\" (UID: \"956acd53-2f4c-40e5-8c85-afcf06bba86a\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.411425 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/956acd53-2f4c-40e5-8c85-afcf06bba86a-var-lib-openvswitch\") pod \"ovnkube-node-tsfg6\" (UID: \"956acd53-2f4c-40e5-8c85-afcf06bba86a\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.411479 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/956acd53-2f4c-40e5-8c85-afcf06bba86a-etc-openvswitch\") pod \"ovnkube-node-tsfg6\" (UID: \"956acd53-2f4c-40e5-8c85-afcf06bba86a\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.411508 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/956acd53-2f4c-40e5-8c85-afcf06bba86a-systemd-units\") pod \"ovnkube-node-tsfg6\" (UID: \"956acd53-2f4c-40e5-8c85-afcf06bba86a\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.411530 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4qsdw\" (UniqueName: \"kubernetes.io/projected/956acd53-2f4c-40e5-8c85-afcf06bba86a-kube-api-access-4qsdw\") pod \"ovnkube-node-tsfg6\" (UID: \"956acd53-2f4c-40e5-8c85-afcf06bba86a\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.411558 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/956acd53-2f4c-40e5-8c85-afcf06bba86a-ovn-node-metrics-cert\") pod \"ovnkube-node-tsfg6\" (UID: \"956acd53-2f4c-40e5-8c85-afcf06bba86a\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.411595 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/956acd53-2f4c-40e5-8c85-afcf06bba86a-ovnkube-script-lib\") pod \"ovnkube-node-tsfg6\" (UID: \"956acd53-2f4c-40e5-8c85-afcf06bba86a\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.411637 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/956acd53-2f4c-40e5-8c85-afcf06bba86a-run-ovn\") pod \"ovnkube-node-tsfg6\" (UID: \"956acd53-2f4c-40e5-8c85-afcf06bba86a\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.411661 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/956acd53-2f4c-40e5-8c85-afcf06bba86a-host-run-ovn-kubernetes\") pod \"ovnkube-node-tsfg6\" (UID: \"956acd53-2f4c-40e5-8c85-afcf06bba86a\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.411703 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/956acd53-2f4c-40e5-8c85-afcf06bba86a-log-socket\") pod \"ovnkube-node-tsfg6\" (UID: \"956acd53-2f4c-40e5-8c85-afcf06bba86a\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.411738 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/956acd53-2f4c-40e5-8c85-afcf06bba86a-run-systemd\") pod \"ovnkube-node-tsfg6\" (UID: \"956acd53-2f4c-40e5-8c85-afcf06bba86a\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.411749 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/956acd53-2f4c-40e5-8c85-afcf06bba86a-etc-openvswitch\") pod \"ovnkube-node-tsfg6\" (UID: \"956acd53-2f4c-40e5-8c85-afcf06bba86a\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.411762 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/956acd53-2f4c-40e5-8c85-afcf06bba86a-ovnkube-config\") pod \"ovnkube-node-tsfg6\" (UID: \"956acd53-2f4c-40e5-8c85-afcf06bba86a\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.411825 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/956acd53-2f4c-40e5-8c85-afcf06bba86a-host-run-netns\") pod \"ovnkube-node-tsfg6\" (UID: \"956acd53-2f4c-40e5-8c85-afcf06bba86a\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.411881 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/956acd53-2f4c-40e5-8c85-afcf06bba86a-host-slash\") pod \"ovnkube-node-tsfg6\" (UID: \"956acd53-2f4c-40e5-8c85-afcf06bba86a\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.411906 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/956acd53-2f4c-40e5-8c85-afcf06bba86a-run-openvswitch\") pod \"ovnkube-node-tsfg6\" (UID: \"956acd53-2f4c-40e5-8c85-afcf06bba86a\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.411935 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/956acd53-2f4c-40e5-8c85-afcf06bba86a-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-tsfg6\" (UID: \"956acd53-2f4c-40e5-8c85-afcf06bba86a\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.411971 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/956acd53-2f4c-40e5-8c85-afcf06bba86a-host-cni-netd\") pod \"ovnkube-node-tsfg6\" (UID: \"956acd53-2f4c-40e5-8c85-afcf06bba86a\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.412141 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/956acd53-2f4c-40e5-8c85-afcf06bba86a-host-cni-netd\") pod \"ovnkube-node-tsfg6\" (UID: \"956acd53-2f4c-40e5-8c85-afcf06bba86a\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.412182 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/956acd53-2f4c-40e5-8c85-afcf06bba86a-node-log\") pod \"ovnkube-node-tsfg6\" (UID: \"956acd53-2f4c-40e5-8c85-afcf06bba86a\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.412214 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/956acd53-2f4c-40e5-8c85-afcf06bba86a-host-cni-bin\") pod \"ovnkube-node-tsfg6\" (UID: \"956acd53-2f4c-40e5-8c85-afcf06bba86a\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.412668 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/956acd53-2f4c-40e5-8c85-afcf06bba86a-ovnkube-config\") pod \"ovnkube-node-tsfg6\" (UID: \"956acd53-2f4c-40e5-8c85-afcf06bba86a\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.412739 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/956acd53-2f4c-40e5-8c85-afcf06bba86a-systemd-units\") pod \"ovnkube-node-tsfg6\" (UID: \"956acd53-2f4c-40e5-8c85-afcf06bba86a\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.412743 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/956acd53-2f4c-40e5-8c85-afcf06bba86a-env-overrides\") pod \"ovnkube-node-tsfg6\" (UID: \"956acd53-2f4c-40e5-8c85-afcf06bba86a\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.412773 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/956acd53-2f4c-40e5-8c85-afcf06bba86a-host-kubelet\") pod \"ovnkube-node-tsfg6\" (UID: \"956acd53-2f4c-40e5-8c85-afcf06bba86a\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.412817 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/956acd53-2f4c-40e5-8c85-afcf06bba86a-var-lib-openvswitch\") pod \"ovnkube-node-tsfg6\" (UID: \"956acd53-2f4c-40e5-8c85-afcf06bba86a\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.413115 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/956acd53-2f4c-40e5-8c85-afcf06bba86a-host-run-netns\") pod \"ovnkube-node-tsfg6\" (UID: \"956acd53-2f4c-40e5-8c85-afcf06bba86a\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.413156 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/956acd53-2f4c-40e5-8c85-afcf06bba86a-host-slash\") pod \"ovnkube-node-tsfg6\" (UID: \"956acd53-2f4c-40e5-8c85-afcf06bba86a\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.413187 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/956acd53-2f4c-40e5-8c85-afcf06bba86a-run-openvswitch\") pod \"ovnkube-node-tsfg6\" (UID: \"956acd53-2f4c-40e5-8c85-afcf06bba86a\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.413232 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/956acd53-2f4c-40e5-8c85-afcf06bba86a-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-tsfg6\" (UID: \"956acd53-2f4c-40e5-8c85-afcf06bba86a\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.413266 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/956acd53-2f4c-40e5-8c85-afcf06bba86a-host-run-ovn-kubernetes\") pod \"ovnkube-node-tsfg6\" (UID: \"956acd53-2f4c-40e5-8c85-afcf06bba86a\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.413971 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/956acd53-2f4c-40e5-8c85-afcf06bba86a-ovnkube-script-lib\") pod \"ovnkube-node-tsfg6\" (UID: \"956acd53-2f4c-40e5-8c85-afcf06bba86a\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.414039 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/956acd53-2f4c-40e5-8c85-afcf06bba86a-run-ovn\") pod \"ovnkube-node-tsfg6\" (UID: \"956acd53-2f4c-40e5-8c85-afcf06bba86a\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.414077 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/956acd53-2f4c-40e5-8c85-afcf06bba86a-log-socket\") pod \"ovnkube-node-tsfg6\" (UID: \"956acd53-2f4c-40e5-8c85-afcf06bba86a\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.414728 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/956acd53-2f4c-40e5-8c85-afcf06bba86a-run-systemd\") pod \"ovnkube-node-tsfg6\" (UID: \"956acd53-2f4c-40e5-8c85-afcf06bba86a\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.418944 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/956acd53-2f4c-40e5-8c85-afcf06bba86a-ovn-node-metrics-cert\") pod \"ovnkube-node-tsfg6\" (UID: \"956acd53-2f4c-40e5-8c85-afcf06bba86a\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.439769 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4qsdw\" (UniqueName: \"kubernetes.io/projected/956acd53-2f4c-40e5-8c85-afcf06bba86a-kube-api-access-4qsdw\") pod \"ovnkube-node-tsfg6\" (UID: \"956acd53-2f4c-40e5-8c85-afcf06bba86a\") " pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.462180 5133 scope.go:117] "RemoveContainer" containerID="34f54716d64105c1d42d53b9d765693c455371991e82a8f605faf9074e0788f4" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.473063 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-tjzm8"] Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.473118 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-tjzm8"] Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.485980 5133 scope.go:117] "RemoveContainer" containerID="5ccbca4b83db30237624e807299bb17bb84a66216f27148373ea648a4c0cc962" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.512106 5133 scope.go:117] "RemoveContainer" containerID="6ab3fdf87c8fc052cd429333579ede0e857fcc8399de947f661b159e6a5f2a93" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.552577 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.573876 5133 scope.go:117] "RemoveContainer" containerID="c53aca99f41348a8343f7a2a2afd9ca78e2e4ba6aae9bb06cdb3ed66c9d79aa8" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.615230 5133 scope.go:117] "RemoveContainer" containerID="94aee1dbbc6cd90fac255e86ddb27f159eba2e08dc6cc749a8eb351842330ee6" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.644994 5133 scope.go:117] "RemoveContainer" containerID="5b6bfce121246f367a034c172b839a31fe309cfc0f83db7ab4e48cb26d6a5145" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.660859 5133 scope.go:117] "RemoveContainer" containerID="ce076f27563e648bcbfd183634e87e0e31cedc359d0df1edc6af448b2a18f1a1" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.680060 5133 scope.go:117] "RemoveContainer" containerID="563b9e061f37ddab57173a01efbf7bf025c470edccc47a03e7c5bb1e317a289f" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.699468 5133 scope.go:117] "RemoveContainer" containerID="fd5003cc4327d8234259623232e844463af0efdc0b3e395fa3e2c30c714b872d" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.715945 5133 scope.go:117] "RemoveContainer" containerID="a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.739823 5133 scope.go:117] "RemoveContainer" containerID="34f54716d64105c1d42d53b9d765693c455371991e82a8f605faf9074e0788f4" Nov 21 13:55:54 crc kubenswrapper[5133]: E1121 13:55:54.740392 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"34f54716d64105c1d42d53b9d765693c455371991e82a8f605faf9074e0788f4\": container with ID starting with 34f54716d64105c1d42d53b9d765693c455371991e82a8f605faf9074e0788f4 not found: ID does not exist" containerID="34f54716d64105c1d42d53b9d765693c455371991e82a8f605faf9074e0788f4" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.740431 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"34f54716d64105c1d42d53b9d765693c455371991e82a8f605faf9074e0788f4"} err="failed to get container status \"34f54716d64105c1d42d53b9d765693c455371991e82a8f605faf9074e0788f4\": rpc error: code = NotFound desc = could not find container \"34f54716d64105c1d42d53b9d765693c455371991e82a8f605faf9074e0788f4\": container with ID starting with 34f54716d64105c1d42d53b9d765693c455371991e82a8f605faf9074e0788f4 not found: ID does not exist" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.740462 5133 scope.go:117] "RemoveContainer" containerID="5ccbca4b83db30237624e807299bb17bb84a66216f27148373ea648a4c0cc962" Nov 21 13:55:54 crc kubenswrapper[5133]: E1121 13:55:54.740831 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5ccbca4b83db30237624e807299bb17bb84a66216f27148373ea648a4c0cc962\": container with ID starting with 5ccbca4b83db30237624e807299bb17bb84a66216f27148373ea648a4c0cc962 not found: ID does not exist" containerID="5ccbca4b83db30237624e807299bb17bb84a66216f27148373ea648a4c0cc962" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.740857 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5ccbca4b83db30237624e807299bb17bb84a66216f27148373ea648a4c0cc962"} err="failed to get container status \"5ccbca4b83db30237624e807299bb17bb84a66216f27148373ea648a4c0cc962\": rpc error: code = NotFound desc = could not find container \"5ccbca4b83db30237624e807299bb17bb84a66216f27148373ea648a4c0cc962\": container with ID starting with 5ccbca4b83db30237624e807299bb17bb84a66216f27148373ea648a4c0cc962 not found: ID does not exist" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.740872 5133 scope.go:117] "RemoveContainer" containerID="6ab3fdf87c8fc052cd429333579ede0e857fcc8399de947f661b159e6a5f2a93" Nov 21 13:55:54 crc kubenswrapper[5133]: E1121 13:55:54.741158 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6ab3fdf87c8fc052cd429333579ede0e857fcc8399de947f661b159e6a5f2a93\": container with ID starting with 6ab3fdf87c8fc052cd429333579ede0e857fcc8399de947f661b159e6a5f2a93 not found: ID does not exist" containerID="6ab3fdf87c8fc052cd429333579ede0e857fcc8399de947f661b159e6a5f2a93" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.741184 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6ab3fdf87c8fc052cd429333579ede0e857fcc8399de947f661b159e6a5f2a93"} err="failed to get container status \"6ab3fdf87c8fc052cd429333579ede0e857fcc8399de947f661b159e6a5f2a93\": rpc error: code = NotFound desc = could not find container \"6ab3fdf87c8fc052cd429333579ede0e857fcc8399de947f661b159e6a5f2a93\": container with ID starting with 6ab3fdf87c8fc052cd429333579ede0e857fcc8399de947f661b159e6a5f2a93 not found: ID does not exist" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.741201 5133 scope.go:117] "RemoveContainer" containerID="c53aca99f41348a8343f7a2a2afd9ca78e2e4ba6aae9bb06cdb3ed66c9d79aa8" Nov 21 13:55:54 crc kubenswrapper[5133]: E1121 13:55:54.741739 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c53aca99f41348a8343f7a2a2afd9ca78e2e4ba6aae9bb06cdb3ed66c9d79aa8\": container with ID starting with c53aca99f41348a8343f7a2a2afd9ca78e2e4ba6aae9bb06cdb3ed66c9d79aa8 not found: ID does not exist" containerID="c53aca99f41348a8343f7a2a2afd9ca78e2e4ba6aae9bb06cdb3ed66c9d79aa8" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.741761 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c53aca99f41348a8343f7a2a2afd9ca78e2e4ba6aae9bb06cdb3ed66c9d79aa8"} err="failed to get container status \"c53aca99f41348a8343f7a2a2afd9ca78e2e4ba6aae9bb06cdb3ed66c9d79aa8\": rpc error: code = NotFound desc = could not find container \"c53aca99f41348a8343f7a2a2afd9ca78e2e4ba6aae9bb06cdb3ed66c9d79aa8\": container with ID starting with c53aca99f41348a8343f7a2a2afd9ca78e2e4ba6aae9bb06cdb3ed66c9d79aa8 not found: ID does not exist" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.741775 5133 scope.go:117] "RemoveContainer" containerID="94aee1dbbc6cd90fac255e86ddb27f159eba2e08dc6cc749a8eb351842330ee6" Nov 21 13:55:54 crc kubenswrapper[5133]: E1121 13:55:54.742150 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"94aee1dbbc6cd90fac255e86ddb27f159eba2e08dc6cc749a8eb351842330ee6\": container with ID starting with 94aee1dbbc6cd90fac255e86ddb27f159eba2e08dc6cc749a8eb351842330ee6 not found: ID does not exist" containerID="94aee1dbbc6cd90fac255e86ddb27f159eba2e08dc6cc749a8eb351842330ee6" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.742178 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"94aee1dbbc6cd90fac255e86ddb27f159eba2e08dc6cc749a8eb351842330ee6"} err="failed to get container status \"94aee1dbbc6cd90fac255e86ddb27f159eba2e08dc6cc749a8eb351842330ee6\": rpc error: code = NotFound desc = could not find container \"94aee1dbbc6cd90fac255e86ddb27f159eba2e08dc6cc749a8eb351842330ee6\": container with ID starting with 94aee1dbbc6cd90fac255e86ddb27f159eba2e08dc6cc749a8eb351842330ee6 not found: ID does not exist" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.742191 5133 scope.go:117] "RemoveContainer" containerID="5b6bfce121246f367a034c172b839a31fe309cfc0f83db7ab4e48cb26d6a5145" Nov 21 13:55:54 crc kubenswrapper[5133]: E1121 13:55:54.742461 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5b6bfce121246f367a034c172b839a31fe309cfc0f83db7ab4e48cb26d6a5145\": container with ID starting with 5b6bfce121246f367a034c172b839a31fe309cfc0f83db7ab4e48cb26d6a5145 not found: ID does not exist" containerID="5b6bfce121246f367a034c172b839a31fe309cfc0f83db7ab4e48cb26d6a5145" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.742485 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5b6bfce121246f367a034c172b839a31fe309cfc0f83db7ab4e48cb26d6a5145"} err="failed to get container status \"5b6bfce121246f367a034c172b839a31fe309cfc0f83db7ab4e48cb26d6a5145\": rpc error: code = NotFound desc = could not find container \"5b6bfce121246f367a034c172b839a31fe309cfc0f83db7ab4e48cb26d6a5145\": container with ID starting with 5b6bfce121246f367a034c172b839a31fe309cfc0f83db7ab4e48cb26d6a5145 not found: ID does not exist" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.742498 5133 scope.go:117] "RemoveContainer" containerID="ce076f27563e648bcbfd183634e87e0e31cedc359d0df1edc6af448b2a18f1a1" Nov 21 13:55:54 crc kubenswrapper[5133]: E1121 13:55:54.742768 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ce076f27563e648bcbfd183634e87e0e31cedc359d0df1edc6af448b2a18f1a1\": container with ID starting with ce076f27563e648bcbfd183634e87e0e31cedc359d0df1edc6af448b2a18f1a1 not found: ID does not exist" containerID="ce076f27563e648bcbfd183634e87e0e31cedc359d0df1edc6af448b2a18f1a1" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.742792 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ce076f27563e648bcbfd183634e87e0e31cedc359d0df1edc6af448b2a18f1a1"} err="failed to get container status \"ce076f27563e648bcbfd183634e87e0e31cedc359d0df1edc6af448b2a18f1a1\": rpc error: code = NotFound desc = could not find container \"ce076f27563e648bcbfd183634e87e0e31cedc359d0df1edc6af448b2a18f1a1\": container with ID starting with ce076f27563e648bcbfd183634e87e0e31cedc359d0df1edc6af448b2a18f1a1 not found: ID does not exist" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.742808 5133 scope.go:117] "RemoveContainer" containerID="563b9e061f37ddab57173a01efbf7bf025c470edccc47a03e7c5bb1e317a289f" Nov 21 13:55:54 crc kubenswrapper[5133]: E1121 13:55:54.743190 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"563b9e061f37ddab57173a01efbf7bf025c470edccc47a03e7c5bb1e317a289f\": container with ID starting with 563b9e061f37ddab57173a01efbf7bf025c470edccc47a03e7c5bb1e317a289f not found: ID does not exist" containerID="563b9e061f37ddab57173a01efbf7bf025c470edccc47a03e7c5bb1e317a289f" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.743213 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"563b9e061f37ddab57173a01efbf7bf025c470edccc47a03e7c5bb1e317a289f"} err="failed to get container status \"563b9e061f37ddab57173a01efbf7bf025c470edccc47a03e7c5bb1e317a289f\": rpc error: code = NotFound desc = could not find container \"563b9e061f37ddab57173a01efbf7bf025c470edccc47a03e7c5bb1e317a289f\": container with ID starting with 563b9e061f37ddab57173a01efbf7bf025c470edccc47a03e7c5bb1e317a289f not found: ID does not exist" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.743228 5133 scope.go:117] "RemoveContainer" containerID="fd5003cc4327d8234259623232e844463af0efdc0b3e395fa3e2c30c714b872d" Nov 21 13:55:54 crc kubenswrapper[5133]: E1121 13:55:54.743500 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fd5003cc4327d8234259623232e844463af0efdc0b3e395fa3e2c30c714b872d\": container with ID starting with fd5003cc4327d8234259623232e844463af0efdc0b3e395fa3e2c30c714b872d not found: ID does not exist" containerID="fd5003cc4327d8234259623232e844463af0efdc0b3e395fa3e2c30c714b872d" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.743524 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fd5003cc4327d8234259623232e844463af0efdc0b3e395fa3e2c30c714b872d"} err="failed to get container status \"fd5003cc4327d8234259623232e844463af0efdc0b3e395fa3e2c30c714b872d\": rpc error: code = NotFound desc = could not find container \"fd5003cc4327d8234259623232e844463af0efdc0b3e395fa3e2c30c714b872d\": container with ID starting with fd5003cc4327d8234259623232e844463af0efdc0b3e395fa3e2c30c714b872d not found: ID does not exist" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.743537 5133 scope.go:117] "RemoveContainer" containerID="a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b" Nov 21 13:55:54 crc kubenswrapper[5133]: E1121 13:55:54.743773 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\": container with ID starting with a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b not found: ID does not exist" containerID="a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.743801 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b"} err="failed to get container status \"a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\": rpc error: code = NotFound desc = could not find container \"a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\": container with ID starting with a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b not found: ID does not exist" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.743821 5133 scope.go:117] "RemoveContainer" containerID="34f54716d64105c1d42d53b9d765693c455371991e82a8f605faf9074e0788f4" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.744101 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"34f54716d64105c1d42d53b9d765693c455371991e82a8f605faf9074e0788f4"} err="failed to get container status \"34f54716d64105c1d42d53b9d765693c455371991e82a8f605faf9074e0788f4\": rpc error: code = NotFound desc = could not find container \"34f54716d64105c1d42d53b9d765693c455371991e82a8f605faf9074e0788f4\": container with ID starting with 34f54716d64105c1d42d53b9d765693c455371991e82a8f605faf9074e0788f4 not found: ID does not exist" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.744127 5133 scope.go:117] "RemoveContainer" containerID="5ccbca4b83db30237624e807299bb17bb84a66216f27148373ea648a4c0cc962" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.744467 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5ccbca4b83db30237624e807299bb17bb84a66216f27148373ea648a4c0cc962"} err="failed to get container status \"5ccbca4b83db30237624e807299bb17bb84a66216f27148373ea648a4c0cc962\": rpc error: code = NotFound desc = could not find container \"5ccbca4b83db30237624e807299bb17bb84a66216f27148373ea648a4c0cc962\": container with ID starting with 5ccbca4b83db30237624e807299bb17bb84a66216f27148373ea648a4c0cc962 not found: ID does not exist" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.744525 5133 scope.go:117] "RemoveContainer" containerID="6ab3fdf87c8fc052cd429333579ede0e857fcc8399de947f661b159e6a5f2a93" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.744942 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6ab3fdf87c8fc052cd429333579ede0e857fcc8399de947f661b159e6a5f2a93"} err="failed to get container status \"6ab3fdf87c8fc052cd429333579ede0e857fcc8399de947f661b159e6a5f2a93\": rpc error: code = NotFound desc = could not find container \"6ab3fdf87c8fc052cd429333579ede0e857fcc8399de947f661b159e6a5f2a93\": container with ID starting with 6ab3fdf87c8fc052cd429333579ede0e857fcc8399de947f661b159e6a5f2a93 not found: ID does not exist" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.745031 5133 scope.go:117] "RemoveContainer" containerID="c53aca99f41348a8343f7a2a2afd9ca78e2e4ba6aae9bb06cdb3ed66c9d79aa8" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.745736 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c53aca99f41348a8343f7a2a2afd9ca78e2e4ba6aae9bb06cdb3ed66c9d79aa8"} err="failed to get container status \"c53aca99f41348a8343f7a2a2afd9ca78e2e4ba6aae9bb06cdb3ed66c9d79aa8\": rpc error: code = NotFound desc = could not find container \"c53aca99f41348a8343f7a2a2afd9ca78e2e4ba6aae9bb06cdb3ed66c9d79aa8\": container with ID starting with c53aca99f41348a8343f7a2a2afd9ca78e2e4ba6aae9bb06cdb3ed66c9d79aa8 not found: ID does not exist" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.745770 5133 scope.go:117] "RemoveContainer" containerID="94aee1dbbc6cd90fac255e86ddb27f159eba2e08dc6cc749a8eb351842330ee6" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.746198 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"94aee1dbbc6cd90fac255e86ddb27f159eba2e08dc6cc749a8eb351842330ee6"} err="failed to get container status \"94aee1dbbc6cd90fac255e86ddb27f159eba2e08dc6cc749a8eb351842330ee6\": rpc error: code = NotFound desc = could not find container \"94aee1dbbc6cd90fac255e86ddb27f159eba2e08dc6cc749a8eb351842330ee6\": container with ID starting with 94aee1dbbc6cd90fac255e86ddb27f159eba2e08dc6cc749a8eb351842330ee6 not found: ID does not exist" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.746226 5133 scope.go:117] "RemoveContainer" containerID="5b6bfce121246f367a034c172b839a31fe309cfc0f83db7ab4e48cb26d6a5145" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.746612 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5b6bfce121246f367a034c172b839a31fe309cfc0f83db7ab4e48cb26d6a5145"} err="failed to get container status \"5b6bfce121246f367a034c172b839a31fe309cfc0f83db7ab4e48cb26d6a5145\": rpc error: code = NotFound desc = could not find container \"5b6bfce121246f367a034c172b839a31fe309cfc0f83db7ab4e48cb26d6a5145\": container with ID starting with 5b6bfce121246f367a034c172b839a31fe309cfc0f83db7ab4e48cb26d6a5145 not found: ID does not exist" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.746630 5133 scope.go:117] "RemoveContainer" containerID="ce076f27563e648bcbfd183634e87e0e31cedc359d0df1edc6af448b2a18f1a1" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.746982 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ce076f27563e648bcbfd183634e87e0e31cedc359d0df1edc6af448b2a18f1a1"} err="failed to get container status \"ce076f27563e648bcbfd183634e87e0e31cedc359d0df1edc6af448b2a18f1a1\": rpc error: code = NotFound desc = could not find container \"ce076f27563e648bcbfd183634e87e0e31cedc359d0df1edc6af448b2a18f1a1\": container with ID starting with ce076f27563e648bcbfd183634e87e0e31cedc359d0df1edc6af448b2a18f1a1 not found: ID does not exist" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.747039 5133 scope.go:117] "RemoveContainer" containerID="563b9e061f37ddab57173a01efbf7bf025c470edccc47a03e7c5bb1e317a289f" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.747339 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"563b9e061f37ddab57173a01efbf7bf025c470edccc47a03e7c5bb1e317a289f"} err="failed to get container status \"563b9e061f37ddab57173a01efbf7bf025c470edccc47a03e7c5bb1e317a289f\": rpc error: code = NotFound desc = could not find container \"563b9e061f37ddab57173a01efbf7bf025c470edccc47a03e7c5bb1e317a289f\": container with ID starting with 563b9e061f37ddab57173a01efbf7bf025c470edccc47a03e7c5bb1e317a289f not found: ID does not exist" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.747362 5133 scope.go:117] "RemoveContainer" containerID="fd5003cc4327d8234259623232e844463af0efdc0b3e395fa3e2c30c714b872d" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.747580 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fd5003cc4327d8234259623232e844463af0efdc0b3e395fa3e2c30c714b872d"} err="failed to get container status \"fd5003cc4327d8234259623232e844463af0efdc0b3e395fa3e2c30c714b872d\": rpc error: code = NotFound desc = could not find container \"fd5003cc4327d8234259623232e844463af0efdc0b3e395fa3e2c30c714b872d\": container with ID starting with fd5003cc4327d8234259623232e844463af0efdc0b3e395fa3e2c30c714b872d not found: ID does not exist" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.747597 5133 scope.go:117] "RemoveContainer" containerID="a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.747798 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b"} err="failed to get container status \"a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\": rpc error: code = NotFound desc = could not find container \"a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\": container with ID starting with a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b not found: ID does not exist" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.747820 5133 scope.go:117] "RemoveContainer" containerID="34f54716d64105c1d42d53b9d765693c455371991e82a8f605faf9074e0788f4" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.750317 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"34f54716d64105c1d42d53b9d765693c455371991e82a8f605faf9074e0788f4"} err="failed to get container status \"34f54716d64105c1d42d53b9d765693c455371991e82a8f605faf9074e0788f4\": rpc error: code = NotFound desc = could not find container \"34f54716d64105c1d42d53b9d765693c455371991e82a8f605faf9074e0788f4\": container with ID starting with 34f54716d64105c1d42d53b9d765693c455371991e82a8f605faf9074e0788f4 not found: ID does not exist" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.750362 5133 scope.go:117] "RemoveContainer" containerID="5ccbca4b83db30237624e807299bb17bb84a66216f27148373ea648a4c0cc962" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.750892 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5ccbca4b83db30237624e807299bb17bb84a66216f27148373ea648a4c0cc962"} err="failed to get container status \"5ccbca4b83db30237624e807299bb17bb84a66216f27148373ea648a4c0cc962\": rpc error: code = NotFound desc = could not find container \"5ccbca4b83db30237624e807299bb17bb84a66216f27148373ea648a4c0cc962\": container with ID starting with 5ccbca4b83db30237624e807299bb17bb84a66216f27148373ea648a4c0cc962 not found: ID does not exist" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.750923 5133 scope.go:117] "RemoveContainer" containerID="6ab3fdf87c8fc052cd429333579ede0e857fcc8399de947f661b159e6a5f2a93" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.751230 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6ab3fdf87c8fc052cd429333579ede0e857fcc8399de947f661b159e6a5f2a93"} err="failed to get container status \"6ab3fdf87c8fc052cd429333579ede0e857fcc8399de947f661b159e6a5f2a93\": rpc error: code = NotFound desc = could not find container \"6ab3fdf87c8fc052cd429333579ede0e857fcc8399de947f661b159e6a5f2a93\": container with ID starting with 6ab3fdf87c8fc052cd429333579ede0e857fcc8399de947f661b159e6a5f2a93 not found: ID does not exist" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.751256 5133 scope.go:117] "RemoveContainer" containerID="c53aca99f41348a8343f7a2a2afd9ca78e2e4ba6aae9bb06cdb3ed66c9d79aa8" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.751521 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c53aca99f41348a8343f7a2a2afd9ca78e2e4ba6aae9bb06cdb3ed66c9d79aa8"} err="failed to get container status \"c53aca99f41348a8343f7a2a2afd9ca78e2e4ba6aae9bb06cdb3ed66c9d79aa8\": rpc error: code = NotFound desc = could not find container \"c53aca99f41348a8343f7a2a2afd9ca78e2e4ba6aae9bb06cdb3ed66c9d79aa8\": container with ID starting with c53aca99f41348a8343f7a2a2afd9ca78e2e4ba6aae9bb06cdb3ed66c9d79aa8 not found: ID does not exist" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.751552 5133 scope.go:117] "RemoveContainer" containerID="94aee1dbbc6cd90fac255e86ddb27f159eba2e08dc6cc749a8eb351842330ee6" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.752285 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"94aee1dbbc6cd90fac255e86ddb27f159eba2e08dc6cc749a8eb351842330ee6"} err="failed to get container status \"94aee1dbbc6cd90fac255e86ddb27f159eba2e08dc6cc749a8eb351842330ee6\": rpc error: code = NotFound desc = could not find container \"94aee1dbbc6cd90fac255e86ddb27f159eba2e08dc6cc749a8eb351842330ee6\": container with ID starting with 94aee1dbbc6cd90fac255e86ddb27f159eba2e08dc6cc749a8eb351842330ee6 not found: ID does not exist" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.752345 5133 scope.go:117] "RemoveContainer" containerID="5b6bfce121246f367a034c172b839a31fe309cfc0f83db7ab4e48cb26d6a5145" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.752702 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5b6bfce121246f367a034c172b839a31fe309cfc0f83db7ab4e48cb26d6a5145"} err="failed to get container status \"5b6bfce121246f367a034c172b839a31fe309cfc0f83db7ab4e48cb26d6a5145\": rpc error: code = NotFound desc = could not find container \"5b6bfce121246f367a034c172b839a31fe309cfc0f83db7ab4e48cb26d6a5145\": container with ID starting with 5b6bfce121246f367a034c172b839a31fe309cfc0f83db7ab4e48cb26d6a5145 not found: ID does not exist" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.752731 5133 scope.go:117] "RemoveContainer" containerID="ce076f27563e648bcbfd183634e87e0e31cedc359d0df1edc6af448b2a18f1a1" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.753283 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ce076f27563e648bcbfd183634e87e0e31cedc359d0df1edc6af448b2a18f1a1"} err="failed to get container status \"ce076f27563e648bcbfd183634e87e0e31cedc359d0df1edc6af448b2a18f1a1\": rpc error: code = NotFound desc = could not find container \"ce076f27563e648bcbfd183634e87e0e31cedc359d0df1edc6af448b2a18f1a1\": container with ID starting with ce076f27563e648bcbfd183634e87e0e31cedc359d0df1edc6af448b2a18f1a1 not found: ID does not exist" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.753304 5133 scope.go:117] "RemoveContainer" containerID="563b9e061f37ddab57173a01efbf7bf025c470edccc47a03e7c5bb1e317a289f" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.753643 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"563b9e061f37ddab57173a01efbf7bf025c470edccc47a03e7c5bb1e317a289f"} err="failed to get container status \"563b9e061f37ddab57173a01efbf7bf025c470edccc47a03e7c5bb1e317a289f\": rpc error: code = NotFound desc = could not find container \"563b9e061f37ddab57173a01efbf7bf025c470edccc47a03e7c5bb1e317a289f\": container with ID starting with 563b9e061f37ddab57173a01efbf7bf025c470edccc47a03e7c5bb1e317a289f not found: ID does not exist" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.753689 5133 scope.go:117] "RemoveContainer" containerID="fd5003cc4327d8234259623232e844463af0efdc0b3e395fa3e2c30c714b872d" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.754057 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fd5003cc4327d8234259623232e844463af0efdc0b3e395fa3e2c30c714b872d"} err="failed to get container status \"fd5003cc4327d8234259623232e844463af0efdc0b3e395fa3e2c30c714b872d\": rpc error: code = NotFound desc = could not find container \"fd5003cc4327d8234259623232e844463af0efdc0b3e395fa3e2c30c714b872d\": container with ID starting with fd5003cc4327d8234259623232e844463af0efdc0b3e395fa3e2c30c714b872d not found: ID does not exist" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.754083 5133 scope.go:117] "RemoveContainer" containerID="a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.754394 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b"} err="failed to get container status \"a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\": rpc error: code = NotFound desc = could not find container \"a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\": container with ID starting with a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b not found: ID does not exist" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.754428 5133 scope.go:117] "RemoveContainer" containerID="34f54716d64105c1d42d53b9d765693c455371991e82a8f605faf9074e0788f4" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.754702 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"34f54716d64105c1d42d53b9d765693c455371991e82a8f605faf9074e0788f4"} err="failed to get container status \"34f54716d64105c1d42d53b9d765693c455371991e82a8f605faf9074e0788f4\": rpc error: code = NotFound desc = could not find container \"34f54716d64105c1d42d53b9d765693c455371991e82a8f605faf9074e0788f4\": container with ID starting with 34f54716d64105c1d42d53b9d765693c455371991e82a8f605faf9074e0788f4 not found: ID does not exist" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.754723 5133 scope.go:117] "RemoveContainer" containerID="5ccbca4b83db30237624e807299bb17bb84a66216f27148373ea648a4c0cc962" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.755048 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5ccbca4b83db30237624e807299bb17bb84a66216f27148373ea648a4c0cc962"} err="failed to get container status \"5ccbca4b83db30237624e807299bb17bb84a66216f27148373ea648a4c0cc962\": rpc error: code = NotFound desc = could not find container \"5ccbca4b83db30237624e807299bb17bb84a66216f27148373ea648a4c0cc962\": container with ID starting with 5ccbca4b83db30237624e807299bb17bb84a66216f27148373ea648a4c0cc962 not found: ID does not exist" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.755068 5133 scope.go:117] "RemoveContainer" containerID="6ab3fdf87c8fc052cd429333579ede0e857fcc8399de947f661b159e6a5f2a93" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.755329 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6ab3fdf87c8fc052cd429333579ede0e857fcc8399de947f661b159e6a5f2a93"} err="failed to get container status \"6ab3fdf87c8fc052cd429333579ede0e857fcc8399de947f661b159e6a5f2a93\": rpc error: code = NotFound desc = could not find container \"6ab3fdf87c8fc052cd429333579ede0e857fcc8399de947f661b159e6a5f2a93\": container with ID starting with 6ab3fdf87c8fc052cd429333579ede0e857fcc8399de947f661b159e6a5f2a93 not found: ID does not exist" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.755348 5133 scope.go:117] "RemoveContainer" containerID="c53aca99f41348a8343f7a2a2afd9ca78e2e4ba6aae9bb06cdb3ed66c9d79aa8" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.755545 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c53aca99f41348a8343f7a2a2afd9ca78e2e4ba6aae9bb06cdb3ed66c9d79aa8"} err="failed to get container status \"c53aca99f41348a8343f7a2a2afd9ca78e2e4ba6aae9bb06cdb3ed66c9d79aa8\": rpc error: code = NotFound desc = could not find container \"c53aca99f41348a8343f7a2a2afd9ca78e2e4ba6aae9bb06cdb3ed66c9d79aa8\": container with ID starting with c53aca99f41348a8343f7a2a2afd9ca78e2e4ba6aae9bb06cdb3ed66c9d79aa8 not found: ID does not exist" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.755563 5133 scope.go:117] "RemoveContainer" containerID="94aee1dbbc6cd90fac255e86ddb27f159eba2e08dc6cc749a8eb351842330ee6" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.755762 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"94aee1dbbc6cd90fac255e86ddb27f159eba2e08dc6cc749a8eb351842330ee6"} err="failed to get container status \"94aee1dbbc6cd90fac255e86ddb27f159eba2e08dc6cc749a8eb351842330ee6\": rpc error: code = NotFound desc = could not find container \"94aee1dbbc6cd90fac255e86ddb27f159eba2e08dc6cc749a8eb351842330ee6\": container with ID starting with 94aee1dbbc6cd90fac255e86ddb27f159eba2e08dc6cc749a8eb351842330ee6 not found: ID does not exist" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.755780 5133 scope.go:117] "RemoveContainer" containerID="5b6bfce121246f367a034c172b839a31fe309cfc0f83db7ab4e48cb26d6a5145" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.756047 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5b6bfce121246f367a034c172b839a31fe309cfc0f83db7ab4e48cb26d6a5145"} err="failed to get container status \"5b6bfce121246f367a034c172b839a31fe309cfc0f83db7ab4e48cb26d6a5145\": rpc error: code = NotFound desc = could not find container \"5b6bfce121246f367a034c172b839a31fe309cfc0f83db7ab4e48cb26d6a5145\": container with ID starting with 5b6bfce121246f367a034c172b839a31fe309cfc0f83db7ab4e48cb26d6a5145 not found: ID does not exist" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.756063 5133 scope.go:117] "RemoveContainer" containerID="ce076f27563e648bcbfd183634e87e0e31cedc359d0df1edc6af448b2a18f1a1" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.756316 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ce076f27563e648bcbfd183634e87e0e31cedc359d0df1edc6af448b2a18f1a1"} err="failed to get container status \"ce076f27563e648bcbfd183634e87e0e31cedc359d0df1edc6af448b2a18f1a1\": rpc error: code = NotFound desc = could not find container \"ce076f27563e648bcbfd183634e87e0e31cedc359d0df1edc6af448b2a18f1a1\": container with ID starting with ce076f27563e648bcbfd183634e87e0e31cedc359d0df1edc6af448b2a18f1a1 not found: ID does not exist" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.756335 5133 scope.go:117] "RemoveContainer" containerID="563b9e061f37ddab57173a01efbf7bf025c470edccc47a03e7c5bb1e317a289f" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.756560 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"563b9e061f37ddab57173a01efbf7bf025c470edccc47a03e7c5bb1e317a289f"} err="failed to get container status \"563b9e061f37ddab57173a01efbf7bf025c470edccc47a03e7c5bb1e317a289f\": rpc error: code = NotFound desc = could not find container \"563b9e061f37ddab57173a01efbf7bf025c470edccc47a03e7c5bb1e317a289f\": container with ID starting with 563b9e061f37ddab57173a01efbf7bf025c470edccc47a03e7c5bb1e317a289f not found: ID does not exist" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.756576 5133 scope.go:117] "RemoveContainer" containerID="fd5003cc4327d8234259623232e844463af0efdc0b3e395fa3e2c30c714b872d" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.756795 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fd5003cc4327d8234259623232e844463af0efdc0b3e395fa3e2c30c714b872d"} err="failed to get container status \"fd5003cc4327d8234259623232e844463af0efdc0b3e395fa3e2c30c714b872d\": rpc error: code = NotFound desc = could not find container \"fd5003cc4327d8234259623232e844463af0efdc0b3e395fa3e2c30c714b872d\": container with ID starting with fd5003cc4327d8234259623232e844463af0efdc0b3e395fa3e2c30c714b872d not found: ID does not exist" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.756811 5133 scope.go:117] "RemoveContainer" containerID="a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b" Nov 21 13:55:54 crc kubenswrapper[5133]: I1121 13:55:54.757297 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b"} err="failed to get container status \"a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\": rpc error: code = NotFound desc = could not find container \"a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b\": container with ID starting with a58a9e26584de72092f1c3127585fe0f6fafbd4af45b269f4f3c23997214963b not found: ID does not exist" Nov 21 13:55:55 crc kubenswrapper[5133]: I1121 13:55:55.407719 5133 generic.go:334] "Generic (PLEG): container finished" podID="956acd53-2f4c-40e5-8c85-afcf06bba86a" containerID="e5682909a95e65b6549579075d6136c9ba3164b15cdf89c5344a2735c2b01074" exitCode=0 Nov 21 13:55:55 crc kubenswrapper[5133]: I1121 13:55:55.407814 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" event={"ID":"956acd53-2f4c-40e5-8c85-afcf06bba86a","Type":"ContainerDied","Data":"e5682909a95e65b6549579075d6136c9ba3164b15cdf89c5344a2735c2b01074"} Nov 21 13:55:55 crc kubenswrapper[5133]: I1121 13:55:55.408255 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" event={"ID":"956acd53-2f4c-40e5-8c85-afcf06bba86a","Type":"ContainerStarted","Data":"3d720d749edb8fd8a66f9a60fcc3a2105e11c65860aed2f3c546b00685809b45"} Nov 21 13:55:55 crc kubenswrapper[5133]: I1121 13:55:55.412075 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-m5d24_0077329a-abad-4c6d-a601-2dc01fd83184/kube-multus/2.log" Nov 21 13:55:55 crc kubenswrapper[5133]: I1121 13:55:55.412190 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-m5d24" event={"ID":"0077329a-abad-4c6d-a601-2dc01fd83184","Type":"ContainerStarted","Data":"b63354f00d31d83d327619a58aa5d013e53e4844d9dcf1b6009286e1d8685baa"} Nov 21 13:55:56 crc kubenswrapper[5133]: I1121 13:55:56.423694 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" event={"ID":"956acd53-2f4c-40e5-8c85-afcf06bba86a","Type":"ContainerStarted","Data":"630df33505f0304b1eb6c97596134e6a53b487addfa5551203f53408ef245879"} Nov 21 13:55:56 crc kubenswrapper[5133]: I1121 13:55:56.424186 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" event={"ID":"956acd53-2f4c-40e5-8c85-afcf06bba86a","Type":"ContainerStarted","Data":"2994e16a2feb78fd66d0edb01b67d49fde76f83c1ac5677b6009b0d7d8eaf1a2"} Nov 21 13:55:56 crc kubenswrapper[5133]: I1121 13:55:56.424272 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" event={"ID":"956acd53-2f4c-40e5-8c85-afcf06bba86a","Type":"ContainerStarted","Data":"9fc204990682c8e446934a00f730cf96903dda7ec86064f68c661ad2271d7b08"} Nov 21 13:55:56 crc kubenswrapper[5133]: I1121 13:55:56.424319 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" event={"ID":"956acd53-2f4c-40e5-8c85-afcf06bba86a","Type":"ContainerStarted","Data":"6c8334caaf380000458a58dd12abdea8b430fd6e56d662793e20c505a0ca7ed6"} Nov 21 13:55:56 crc kubenswrapper[5133]: I1121 13:55:56.424335 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" event={"ID":"956acd53-2f4c-40e5-8c85-afcf06bba86a","Type":"ContainerStarted","Data":"72d6e943a0fe0dba3f0e80752476241f4a6560b0965b8d8bb9e7fa0e3120ea55"} Nov 21 13:55:56 crc kubenswrapper[5133]: I1121 13:55:56.424349 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" event={"ID":"956acd53-2f4c-40e5-8c85-afcf06bba86a","Type":"ContainerStarted","Data":"634a892a78ba9927624277c9250fbd446c2683bbbd40b837643b2812b26c3358"} Nov 21 13:55:56 crc kubenswrapper[5133]: I1121 13:55:56.465459 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="373d5da7-fae9-4689-9ede-6e2d69a54c02" path="/var/lib/kubelet/pods/373d5da7-fae9-4689-9ede-6e2d69a54c02/volumes" Nov 21 13:55:58 crc kubenswrapper[5133]: I1121 13:55:58.536636 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-5655c58dd6-k9w95" Nov 21 13:55:59 crc kubenswrapper[5133]: I1121 13:55:59.474274 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" event={"ID":"956acd53-2f4c-40e5-8c85-afcf06bba86a","Type":"ContainerStarted","Data":"22c5fcab85177ef3a170793900f951e4746fa8a80e3c8c0b6304d5cee80e0ff8"} Nov 21 13:56:02 crc kubenswrapper[5133]: I1121 13:56:02.498211 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" event={"ID":"956acd53-2f4c-40e5-8c85-afcf06bba86a","Type":"ContainerStarted","Data":"b10b46df925c468e2ed9986669fb4d18068420220a7633074e58b45986523b72"} Nov 21 13:56:03 crc kubenswrapper[5133]: I1121 13:56:03.504245 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" Nov 21 13:56:03 crc kubenswrapper[5133]: I1121 13:56:03.505191 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" Nov 21 13:56:03 crc kubenswrapper[5133]: I1121 13:56:03.546281 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" Nov 21 13:56:03 crc kubenswrapper[5133]: I1121 13:56:03.548983 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" podStartSLOduration=9.548966582 podStartE2EDuration="9.548966582s" podCreationTimestamp="2025-11-21 13:55:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 13:56:03.546183667 +0000 UTC m=+823.344015925" watchObservedRunningTime="2025-11-21 13:56:03.548966582 +0000 UTC m=+823.346798830" Nov 21 13:56:04 crc kubenswrapper[5133]: I1121 13:56:04.512152 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" Nov 21 13:56:04 crc kubenswrapper[5133]: I1121 13:56:04.550920 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" Nov 21 13:56:16 crc kubenswrapper[5133]: I1121 13:56:16.757356 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-76jss"] Nov 21 13:56:16 crc kubenswrapper[5133]: I1121 13:56:16.760594 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-76jss" Nov 21 13:56:16 crc kubenswrapper[5133]: I1121 13:56:16.774784 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-76jss"] Nov 21 13:56:16 crc kubenswrapper[5133]: I1121 13:56:16.854592 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/00342006-5e0c-4a68-9aab-e9355d0e224e-utilities\") pod \"community-operators-76jss\" (UID: \"00342006-5e0c-4a68-9aab-e9355d0e224e\") " pod="openshift-marketplace/community-operators-76jss" Nov 21 13:56:16 crc kubenswrapper[5133]: I1121 13:56:16.854744 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/00342006-5e0c-4a68-9aab-e9355d0e224e-catalog-content\") pod \"community-operators-76jss\" (UID: \"00342006-5e0c-4a68-9aab-e9355d0e224e\") " pod="openshift-marketplace/community-operators-76jss" Nov 21 13:56:16 crc kubenswrapper[5133]: I1121 13:56:16.854822 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2lfvt\" (UniqueName: \"kubernetes.io/projected/00342006-5e0c-4a68-9aab-e9355d0e224e-kube-api-access-2lfvt\") pod \"community-operators-76jss\" (UID: \"00342006-5e0c-4a68-9aab-e9355d0e224e\") " pod="openshift-marketplace/community-operators-76jss" Nov 21 13:56:16 crc kubenswrapper[5133]: I1121 13:56:16.956598 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/00342006-5e0c-4a68-9aab-e9355d0e224e-catalog-content\") pod \"community-operators-76jss\" (UID: \"00342006-5e0c-4a68-9aab-e9355d0e224e\") " pod="openshift-marketplace/community-operators-76jss" Nov 21 13:56:16 crc kubenswrapper[5133]: I1121 13:56:16.956684 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2lfvt\" (UniqueName: \"kubernetes.io/projected/00342006-5e0c-4a68-9aab-e9355d0e224e-kube-api-access-2lfvt\") pod \"community-operators-76jss\" (UID: \"00342006-5e0c-4a68-9aab-e9355d0e224e\") " pod="openshift-marketplace/community-operators-76jss" Nov 21 13:56:16 crc kubenswrapper[5133]: I1121 13:56:16.956726 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/00342006-5e0c-4a68-9aab-e9355d0e224e-utilities\") pod \"community-operators-76jss\" (UID: \"00342006-5e0c-4a68-9aab-e9355d0e224e\") " pod="openshift-marketplace/community-operators-76jss" Nov 21 13:56:16 crc kubenswrapper[5133]: I1121 13:56:16.957406 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/00342006-5e0c-4a68-9aab-e9355d0e224e-utilities\") pod \"community-operators-76jss\" (UID: \"00342006-5e0c-4a68-9aab-e9355d0e224e\") " pod="openshift-marketplace/community-operators-76jss" Nov 21 13:56:16 crc kubenswrapper[5133]: I1121 13:56:16.957837 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/00342006-5e0c-4a68-9aab-e9355d0e224e-catalog-content\") pod \"community-operators-76jss\" (UID: \"00342006-5e0c-4a68-9aab-e9355d0e224e\") " pod="openshift-marketplace/community-operators-76jss" Nov 21 13:56:16 crc kubenswrapper[5133]: I1121 13:56:16.982436 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2lfvt\" (UniqueName: \"kubernetes.io/projected/00342006-5e0c-4a68-9aab-e9355d0e224e-kube-api-access-2lfvt\") pod \"community-operators-76jss\" (UID: \"00342006-5e0c-4a68-9aab-e9355d0e224e\") " pod="openshift-marketplace/community-operators-76jss" Nov 21 13:56:17 crc kubenswrapper[5133]: I1121 13:56:17.079141 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-76jss" Nov 21 13:56:17 crc kubenswrapper[5133]: I1121 13:56:17.374838 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-76jss"] Nov 21 13:56:17 crc kubenswrapper[5133]: W1121 13:56:17.384293 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod00342006_5e0c_4a68_9aab_e9355d0e224e.slice/crio-ae0718742354d78c402d8c8dbaa4a02e791d19356336b65f6dcaa4145d0848ac WatchSource:0}: Error finding container ae0718742354d78c402d8c8dbaa4a02e791d19356336b65f6dcaa4145d0848ac: Status 404 returned error can't find the container with id ae0718742354d78c402d8c8dbaa4a02e791d19356336b65f6dcaa4145d0848ac Nov 21 13:56:17 crc kubenswrapper[5133]: I1121 13:56:17.601306 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-76jss" event={"ID":"00342006-5e0c-4a68-9aab-e9355d0e224e","Type":"ContainerStarted","Data":"ae0718742354d78c402d8c8dbaa4a02e791d19356336b65f6dcaa4145d0848ac"} Nov 21 13:56:18 crc kubenswrapper[5133]: I1121 13:56:18.612220 5133 generic.go:334] "Generic (PLEG): container finished" podID="00342006-5e0c-4a68-9aab-e9355d0e224e" containerID="0ff1e4a6e37f42960262aac900058d2c1d8287eeedff7468490d8ae3b90351e3" exitCode=0 Nov 21 13:56:18 crc kubenswrapper[5133]: I1121 13:56:18.612285 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-76jss" event={"ID":"00342006-5e0c-4a68-9aab-e9355d0e224e","Type":"ContainerDied","Data":"0ff1e4a6e37f42960262aac900058d2c1d8287eeedff7468490d8ae3b90351e3"} Nov 21 13:56:20 crc kubenswrapper[5133]: I1121 13:56:20.629552 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-76jss" event={"ID":"00342006-5e0c-4a68-9aab-e9355d0e224e","Type":"ContainerStarted","Data":"2d2549f5e5d1a10b1a95a230d5331d5cf7ffa07d18c2029ea78758bec0700c20"} Nov 21 13:56:21 crc kubenswrapper[5133]: I1121 13:56:21.642254 5133 generic.go:334] "Generic (PLEG): container finished" podID="00342006-5e0c-4a68-9aab-e9355d0e224e" containerID="2d2549f5e5d1a10b1a95a230d5331d5cf7ffa07d18c2029ea78758bec0700c20" exitCode=0 Nov 21 13:56:21 crc kubenswrapper[5133]: I1121 13:56:21.642332 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-76jss" event={"ID":"00342006-5e0c-4a68-9aab-e9355d0e224e","Type":"ContainerDied","Data":"2d2549f5e5d1a10b1a95a230d5331d5cf7ffa07d18c2029ea78758bec0700c20"} Nov 21 13:56:22 crc kubenswrapper[5133]: I1121 13:56:22.652361 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-76jss" event={"ID":"00342006-5e0c-4a68-9aab-e9355d0e224e","Type":"ContainerStarted","Data":"43cd76c612e9c1513973779432d594d08458a67a4871e0bb0c4b278a258f5d4f"} Nov 21 13:56:22 crc kubenswrapper[5133]: I1121 13:56:22.683531 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-76jss" podStartSLOduration=3.1943560460000002 podStartE2EDuration="6.683498856s" podCreationTimestamp="2025-11-21 13:56:16 +0000 UTC" firstStartedPulling="2025-11-21 13:56:18.615151307 +0000 UTC m=+838.412983595" lastFinishedPulling="2025-11-21 13:56:22.104294157 +0000 UTC m=+841.902126405" observedRunningTime="2025-11-21 13:56:22.679495278 +0000 UTC m=+842.477327526" watchObservedRunningTime="2025-11-21 13:56:22.683498856 +0000 UTC m=+842.481331104" Nov 21 13:56:23 crc kubenswrapper[5133]: I1121 13:56:23.310956 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 13:56:23 crc kubenswrapper[5133]: I1121 13:56:23.311078 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 13:56:24 crc kubenswrapper[5133]: I1121 13:56:24.583564 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-tsfg6" Nov 21 13:56:27 crc kubenswrapper[5133]: I1121 13:56:27.079392 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-76jss" Nov 21 13:56:27 crc kubenswrapper[5133]: I1121 13:56:27.080091 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-76jss" Nov 21 13:56:27 crc kubenswrapper[5133]: I1121 13:56:27.138471 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-76jss" Nov 21 13:56:27 crc kubenswrapper[5133]: I1121 13:56:27.738986 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-76jss" Nov 21 13:56:27 crc kubenswrapper[5133]: I1121 13:56:27.815228 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-76jss"] Nov 21 13:56:29 crc kubenswrapper[5133]: I1121 13:56:29.696380 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-76jss" podUID="00342006-5e0c-4a68-9aab-e9355d0e224e" containerName="registry-server" containerID="cri-o://43cd76c612e9c1513973779432d594d08458a67a4871e0bb0c4b278a258f5d4f" gracePeriod=2 Nov 21 13:56:30 crc kubenswrapper[5133]: I1121 13:56:30.629826 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-76jss" Nov 21 13:56:30 crc kubenswrapper[5133]: I1121 13:56:30.677774 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/00342006-5e0c-4a68-9aab-e9355d0e224e-catalog-content\") pod \"00342006-5e0c-4a68-9aab-e9355d0e224e\" (UID: \"00342006-5e0c-4a68-9aab-e9355d0e224e\") " Nov 21 13:56:30 crc kubenswrapper[5133]: I1121 13:56:30.677907 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2lfvt\" (UniqueName: \"kubernetes.io/projected/00342006-5e0c-4a68-9aab-e9355d0e224e-kube-api-access-2lfvt\") pod \"00342006-5e0c-4a68-9aab-e9355d0e224e\" (UID: \"00342006-5e0c-4a68-9aab-e9355d0e224e\") " Nov 21 13:56:30 crc kubenswrapper[5133]: I1121 13:56:30.678026 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/00342006-5e0c-4a68-9aab-e9355d0e224e-utilities\") pod \"00342006-5e0c-4a68-9aab-e9355d0e224e\" (UID: \"00342006-5e0c-4a68-9aab-e9355d0e224e\") " Nov 21 13:56:30 crc kubenswrapper[5133]: I1121 13:56:30.679314 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/00342006-5e0c-4a68-9aab-e9355d0e224e-utilities" (OuterVolumeSpecName: "utilities") pod "00342006-5e0c-4a68-9aab-e9355d0e224e" (UID: "00342006-5e0c-4a68-9aab-e9355d0e224e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 13:56:30 crc kubenswrapper[5133]: I1121 13:56:30.684114 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/00342006-5e0c-4a68-9aab-e9355d0e224e-kube-api-access-2lfvt" (OuterVolumeSpecName: "kube-api-access-2lfvt") pod "00342006-5e0c-4a68-9aab-e9355d0e224e" (UID: "00342006-5e0c-4a68-9aab-e9355d0e224e"). InnerVolumeSpecName "kube-api-access-2lfvt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:56:30 crc kubenswrapper[5133]: I1121 13:56:30.706302 5133 generic.go:334] "Generic (PLEG): container finished" podID="00342006-5e0c-4a68-9aab-e9355d0e224e" containerID="43cd76c612e9c1513973779432d594d08458a67a4871e0bb0c4b278a258f5d4f" exitCode=0 Nov 21 13:56:30 crc kubenswrapper[5133]: I1121 13:56:30.706348 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-76jss" event={"ID":"00342006-5e0c-4a68-9aab-e9355d0e224e","Type":"ContainerDied","Data":"43cd76c612e9c1513973779432d594d08458a67a4871e0bb0c4b278a258f5d4f"} Nov 21 13:56:30 crc kubenswrapper[5133]: I1121 13:56:30.706376 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-76jss" event={"ID":"00342006-5e0c-4a68-9aab-e9355d0e224e","Type":"ContainerDied","Data":"ae0718742354d78c402d8c8dbaa4a02e791d19356336b65f6dcaa4145d0848ac"} Nov 21 13:56:30 crc kubenswrapper[5133]: I1121 13:56:30.706394 5133 scope.go:117] "RemoveContainer" containerID="43cd76c612e9c1513973779432d594d08458a67a4871e0bb0c4b278a258f5d4f" Nov 21 13:56:30 crc kubenswrapper[5133]: I1121 13:56:30.706503 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-76jss" Nov 21 13:56:30 crc kubenswrapper[5133]: I1121 13:56:30.728143 5133 scope.go:117] "RemoveContainer" containerID="2d2549f5e5d1a10b1a95a230d5331d5cf7ffa07d18c2029ea78758bec0700c20" Nov 21 13:56:30 crc kubenswrapper[5133]: I1121 13:56:30.747518 5133 scope.go:117] "RemoveContainer" containerID="0ff1e4a6e37f42960262aac900058d2c1d8287eeedff7468490d8ae3b90351e3" Nov 21 13:56:30 crc kubenswrapper[5133]: I1121 13:56:30.749112 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/00342006-5e0c-4a68-9aab-e9355d0e224e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "00342006-5e0c-4a68-9aab-e9355d0e224e" (UID: "00342006-5e0c-4a68-9aab-e9355d0e224e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 13:56:30 crc kubenswrapper[5133]: I1121 13:56:30.764749 5133 scope.go:117] "RemoveContainer" containerID="43cd76c612e9c1513973779432d594d08458a67a4871e0bb0c4b278a258f5d4f" Nov 21 13:56:30 crc kubenswrapper[5133]: E1121 13:56:30.765461 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"43cd76c612e9c1513973779432d594d08458a67a4871e0bb0c4b278a258f5d4f\": container with ID starting with 43cd76c612e9c1513973779432d594d08458a67a4871e0bb0c4b278a258f5d4f not found: ID does not exist" containerID="43cd76c612e9c1513973779432d594d08458a67a4871e0bb0c4b278a258f5d4f" Nov 21 13:56:30 crc kubenswrapper[5133]: I1121 13:56:30.765507 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"43cd76c612e9c1513973779432d594d08458a67a4871e0bb0c4b278a258f5d4f"} err="failed to get container status \"43cd76c612e9c1513973779432d594d08458a67a4871e0bb0c4b278a258f5d4f\": rpc error: code = NotFound desc = could not find container \"43cd76c612e9c1513973779432d594d08458a67a4871e0bb0c4b278a258f5d4f\": container with ID starting with 43cd76c612e9c1513973779432d594d08458a67a4871e0bb0c4b278a258f5d4f not found: ID does not exist" Nov 21 13:56:30 crc kubenswrapper[5133]: I1121 13:56:30.765534 5133 scope.go:117] "RemoveContainer" containerID="2d2549f5e5d1a10b1a95a230d5331d5cf7ffa07d18c2029ea78758bec0700c20" Nov 21 13:56:30 crc kubenswrapper[5133]: E1121 13:56:30.766029 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2d2549f5e5d1a10b1a95a230d5331d5cf7ffa07d18c2029ea78758bec0700c20\": container with ID starting with 2d2549f5e5d1a10b1a95a230d5331d5cf7ffa07d18c2029ea78758bec0700c20 not found: ID does not exist" containerID="2d2549f5e5d1a10b1a95a230d5331d5cf7ffa07d18c2029ea78758bec0700c20" Nov 21 13:56:30 crc kubenswrapper[5133]: I1121 13:56:30.766146 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2d2549f5e5d1a10b1a95a230d5331d5cf7ffa07d18c2029ea78758bec0700c20"} err="failed to get container status \"2d2549f5e5d1a10b1a95a230d5331d5cf7ffa07d18c2029ea78758bec0700c20\": rpc error: code = NotFound desc = could not find container \"2d2549f5e5d1a10b1a95a230d5331d5cf7ffa07d18c2029ea78758bec0700c20\": container with ID starting with 2d2549f5e5d1a10b1a95a230d5331d5cf7ffa07d18c2029ea78758bec0700c20 not found: ID does not exist" Nov 21 13:56:30 crc kubenswrapper[5133]: I1121 13:56:30.766185 5133 scope.go:117] "RemoveContainer" containerID="0ff1e4a6e37f42960262aac900058d2c1d8287eeedff7468490d8ae3b90351e3" Nov 21 13:56:30 crc kubenswrapper[5133]: E1121 13:56:30.766830 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0ff1e4a6e37f42960262aac900058d2c1d8287eeedff7468490d8ae3b90351e3\": container with ID starting with 0ff1e4a6e37f42960262aac900058d2c1d8287eeedff7468490d8ae3b90351e3 not found: ID does not exist" containerID="0ff1e4a6e37f42960262aac900058d2c1d8287eeedff7468490d8ae3b90351e3" Nov 21 13:56:30 crc kubenswrapper[5133]: I1121 13:56:30.766862 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0ff1e4a6e37f42960262aac900058d2c1d8287eeedff7468490d8ae3b90351e3"} err="failed to get container status \"0ff1e4a6e37f42960262aac900058d2c1d8287eeedff7468490d8ae3b90351e3\": rpc error: code = NotFound desc = could not find container \"0ff1e4a6e37f42960262aac900058d2c1d8287eeedff7468490d8ae3b90351e3\": container with ID starting with 0ff1e4a6e37f42960262aac900058d2c1d8287eeedff7468490d8ae3b90351e3 not found: ID does not exist" Nov 21 13:56:30 crc kubenswrapper[5133]: I1121 13:56:30.779298 5133 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/00342006-5e0c-4a68-9aab-e9355d0e224e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 13:56:30 crc kubenswrapper[5133]: I1121 13:56:30.779354 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2lfvt\" (UniqueName: \"kubernetes.io/projected/00342006-5e0c-4a68-9aab-e9355d0e224e-kube-api-access-2lfvt\") on node \"crc\" DevicePath \"\"" Nov 21 13:56:30 crc kubenswrapper[5133]: I1121 13:56:30.779368 5133 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/00342006-5e0c-4a68-9aab-e9355d0e224e-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 13:56:31 crc kubenswrapper[5133]: I1121 13:56:31.062238 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-76jss"] Nov 21 13:56:31 crc kubenswrapper[5133]: I1121 13:56:31.069934 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-76jss"] Nov 21 13:56:32 crc kubenswrapper[5133]: I1121 13:56:32.466403 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="00342006-5e0c-4a68-9aab-e9355d0e224e" path="/var/lib/kubelet/pods/00342006-5e0c-4a68-9aab-e9355d0e224e/volumes" Nov 21 13:56:49 crc kubenswrapper[5133]: I1121 13:56:49.030358 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprr57"] Nov 21 13:56:49 crc kubenswrapper[5133]: E1121 13:56:49.031357 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00342006-5e0c-4a68-9aab-e9355d0e224e" containerName="registry-server" Nov 21 13:56:49 crc kubenswrapper[5133]: I1121 13:56:49.031374 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="00342006-5e0c-4a68-9aab-e9355d0e224e" containerName="registry-server" Nov 21 13:56:49 crc kubenswrapper[5133]: E1121 13:56:49.031388 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00342006-5e0c-4a68-9aab-e9355d0e224e" containerName="extract-utilities" Nov 21 13:56:49 crc kubenswrapper[5133]: I1121 13:56:49.031396 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="00342006-5e0c-4a68-9aab-e9355d0e224e" containerName="extract-utilities" Nov 21 13:56:49 crc kubenswrapper[5133]: E1121 13:56:49.031404 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00342006-5e0c-4a68-9aab-e9355d0e224e" containerName="extract-content" Nov 21 13:56:49 crc kubenswrapper[5133]: I1121 13:56:49.031410 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="00342006-5e0c-4a68-9aab-e9355d0e224e" containerName="extract-content" Nov 21 13:56:49 crc kubenswrapper[5133]: I1121 13:56:49.031517 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="00342006-5e0c-4a68-9aab-e9355d0e224e" containerName="registry-server" Nov 21 13:56:49 crc kubenswrapper[5133]: I1121 13:56:49.032284 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprr57" Nov 21 13:56:49 crc kubenswrapper[5133]: I1121 13:56:49.037562 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 21 13:56:49 crc kubenswrapper[5133]: I1121 13:56:49.072130 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprr57"] Nov 21 13:56:49 crc kubenswrapper[5133]: I1121 13:56:49.139409 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2664fda7-42fc-42a7-9c72-ccd5ce36a862-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprr57\" (UID: \"2664fda7-42fc-42a7-9c72-ccd5ce36a862\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprr57" Nov 21 13:56:49 crc kubenswrapper[5133]: I1121 13:56:49.139494 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2664fda7-42fc-42a7-9c72-ccd5ce36a862-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprr57\" (UID: \"2664fda7-42fc-42a7-9c72-ccd5ce36a862\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprr57" Nov 21 13:56:49 crc kubenswrapper[5133]: I1121 13:56:49.139549 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xm4zm\" (UniqueName: \"kubernetes.io/projected/2664fda7-42fc-42a7-9c72-ccd5ce36a862-kube-api-access-xm4zm\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprr57\" (UID: \"2664fda7-42fc-42a7-9c72-ccd5ce36a862\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprr57" Nov 21 13:56:49 crc kubenswrapper[5133]: I1121 13:56:49.241582 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xm4zm\" (UniqueName: \"kubernetes.io/projected/2664fda7-42fc-42a7-9c72-ccd5ce36a862-kube-api-access-xm4zm\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprr57\" (UID: \"2664fda7-42fc-42a7-9c72-ccd5ce36a862\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprr57" Nov 21 13:56:49 crc kubenswrapper[5133]: I1121 13:56:49.241671 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2664fda7-42fc-42a7-9c72-ccd5ce36a862-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprr57\" (UID: \"2664fda7-42fc-42a7-9c72-ccd5ce36a862\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprr57" Nov 21 13:56:49 crc kubenswrapper[5133]: I1121 13:56:49.241712 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2664fda7-42fc-42a7-9c72-ccd5ce36a862-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprr57\" (UID: \"2664fda7-42fc-42a7-9c72-ccd5ce36a862\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprr57" Nov 21 13:56:49 crc kubenswrapper[5133]: I1121 13:56:49.242410 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2664fda7-42fc-42a7-9c72-ccd5ce36a862-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprr57\" (UID: \"2664fda7-42fc-42a7-9c72-ccd5ce36a862\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprr57" Nov 21 13:56:49 crc kubenswrapper[5133]: I1121 13:56:49.242545 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2664fda7-42fc-42a7-9c72-ccd5ce36a862-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprr57\" (UID: \"2664fda7-42fc-42a7-9c72-ccd5ce36a862\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprr57" Nov 21 13:56:49 crc kubenswrapper[5133]: I1121 13:56:49.264864 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xm4zm\" (UniqueName: \"kubernetes.io/projected/2664fda7-42fc-42a7-9c72-ccd5ce36a862-kube-api-access-xm4zm\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprr57\" (UID: \"2664fda7-42fc-42a7-9c72-ccd5ce36a862\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprr57" Nov 21 13:56:49 crc kubenswrapper[5133]: I1121 13:56:49.369161 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprr57" Nov 21 13:56:49 crc kubenswrapper[5133]: I1121 13:56:49.632490 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprr57"] Nov 21 13:56:49 crc kubenswrapper[5133]: I1121 13:56:49.850056 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprr57" event={"ID":"2664fda7-42fc-42a7-9c72-ccd5ce36a862","Type":"ContainerStarted","Data":"52ab966acc186b55b6fc425c319df844cb81ca60baf04636ccb9290fffd4cd87"} Nov 21 13:56:49 crc kubenswrapper[5133]: I1121 13:56:49.850627 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprr57" event={"ID":"2664fda7-42fc-42a7-9c72-ccd5ce36a862","Type":"ContainerStarted","Data":"57438773a6573a4a15c3f37adf30577b34bef7f74df08aef981a6b178acbb781"} Nov 21 13:56:50 crc kubenswrapper[5133]: I1121 13:56:50.859976 5133 generic.go:334] "Generic (PLEG): container finished" podID="2664fda7-42fc-42a7-9c72-ccd5ce36a862" containerID="52ab966acc186b55b6fc425c319df844cb81ca60baf04636ccb9290fffd4cd87" exitCode=0 Nov 21 13:56:50 crc kubenswrapper[5133]: I1121 13:56:50.860147 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprr57" event={"ID":"2664fda7-42fc-42a7-9c72-ccd5ce36a862","Type":"ContainerDied","Data":"52ab966acc186b55b6fc425c319df844cb81ca60baf04636ccb9290fffd4cd87"} Nov 21 13:56:51 crc kubenswrapper[5133]: I1121 13:56:51.368278 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-gsn76"] Nov 21 13:56:51 crc kubenswrapper[5133]: I1121 13:56:51.370234 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gsn76" Nov 21 13:56:51 crc kubenswrapper[5133]: I1121 13:56:51.388278 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-gsn76"] Nov 21 13:56:51 crc kubenswrapper[5133]: I1121 13:56:51.474898 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7727fb74-5466-4e5e-bdaa-1c4afd8a39bb-utilities\") pod \"redhat-operators-gsn76\" (UID: \"7727fb74-5466-4e5e-bdaa-1c4afd8a39bb\") " pod="openshift-marketplace/redhat-operators-gsn76" Nov 21 13:56:51 crc kubenswrapper[5133]: I1121 13:56:51.474972 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7727fb74-5466-4e5e-bdaa-1c4afd8a39bb-catalog-content\") pod \"redhat-operators-gsn76\" (UID: \"7727fb74-5466-4e5e-bdaa-1c4afd8a39bb\") " pod="openshift-marketplace/redhat-operators-gsn76" Nov 21 13:56:51 crc kubenswrapper[5133]: I1121 13:56:51.475164 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k7bp7\" (UniqueName: \"kubernetes.io/projected/7727fb74-5466-4e5e-bdaa-1c4afd8a39bb-kube-api-access-k7bp7\") pod \"redhat-operators-gsn76\" (UID: \"7727fb74-5466-4e5e-bdaa-1c4afd8a39bb\") " pod="openshift-marketplace/redhat-operators-gsn76" Nov 21 13:56:51 crc kubenswrapper[5133]: I1121 13:56:51.576600 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7727fb74-5466-4e5e-bdaa-1c4afd8a39bb-utilities\") pod \"redhat-operators-gsn76\" (UID: \"7727fb74-5466-4e5e-bdaa-1c4afd8a39bb\") " pod="openshift-marketplace/redhat-operators-gsn76" Nov 21 13:56:51 crc kubenswrapper[5133]: I1121 13:56:51.576669 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7727fb74-5466-4e5e-bdaa-1c4afd8a39bb-catalog-content\") pod \"redhat-operators-gsn76\" (UID: \"7727fb74-5466-4e5e-bdaa-1c4afd8a39bb\") " pod="openshift-marketplace/redhat-operators-gsn76" Nov 21 13:56:51 crc kubenswrapper[5133]: I1121 13:56:51.576693 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k7bp7\" (UniqueName: \"kubernetes.io/projected/7727fb74-5466-4e5e-bdaa-1c4afd8a39bb-kube-api-access-k7bp7\") pod \"redhat-operators-gsn76\" (UID: \"7727fb74-5466-4e5e-bdaa-1c4afd8a39bb\") " pod="openshift-marketplace/redhat-operators-gsn76" Nov 21 13:56:51 crc kubenswrapper[5133]: I1121 13:56:51.577236 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7727fb74-5466-4e5e-bdaa-1c4afd8a39bb-utilities\") pod \"redhat-operators-gsn76\" (UID: \"7727fb74-5466-4e5e-bdaa-1c4afd8a39bb\") " pod="openshift-marketplace/redhat-operators-gsn76" Nov 21 13:56:51 crc kubenswrapper[5133]: I1121 13:56:51.577322 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7727fb74-5466-4e5e-bdaa-1c4afd8a39bb-catalog-content\") pod \"redhat-operators-gsn76\" (UID: \"7727fb74-5466-4e5e-bdaa-1c4afd8a39bb\") " pod="openshift-marketplace/redhat-operators-gsn76" Nov 21 13:56:51 crc kubenswrapper[5133]: I1121 13:56:51.605854 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k7bp7\" (UniqueName: \"kubernetes.io/projected/7727fb74-5466-4e5e-bdaa-1c4afd8a39bb-kube-api-access-k7bp7\") pod \"redhat-operators-gsn76\" (UID: \"7727fb74-5466-4e5e-bdaa-1c4afd8a39bb\") " pod="openshift-marketplace/redhat-operators-gsn76" Nov 21 13:56:51 crc kubenswrapper[5133]: I1121 13:56:51.702356 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gsn76" Nov 21 13:56:52 crc kubenswrapper[5133]: I1121 13:56:52.141310 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-gsn76"] Nov 21 13:56:52 crc kubenswrapper[5133]: W1121 13:56:52.145513 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7727fb74_5466_4e5e_bdaa_1c4afd8a39bb.slice/crio-0e8229ce977b959ad38d5d79beaf07e5d9e662a6c6d5cb58fac740a9b2a348cd WatchSource:0}: Error finding container 0e8229ce977b959ad38d5d79beaf07e5d9e662a6c6d5cb58fac740a9b2a348cd: Status 404 returned error can't find the container with id 0e8229ce977b959ad38d5d79beaf07e5d9e662a6c6d5cb58fac740a9b2a348cd Nov 21 13:56:52 crc kubenswrapper[5133]: I1121 13:56:52.885677 5133 generic.go:334] "Generic (PLEG): container finished" podID="7727fb74-5466-4e5e-bdaa-1c4afd8a39bb" containerID="ad6a079bf53472e6b896baf694f80ff902b4fb8447cb467cfa68f83b7ed11ed2" exitCode=0 Nov 21 13:56:52 crc kubenswrapper[5133]: I1121 13:56:52.886213 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gsn76" event={"ID":"7727fb74-5466-4e5e-bdaa-1c4afd8a39bb","Type":"ContainerDied","Data":"ad6a079bf53472e6b896baf694f80ff902b4fb8447cb467cfa68f83b7ed11ed2"} Nov 21 13:56:52 crc kubenswrapper[5133]: I1121 13:56:52.886276 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gsn76" event={"ID":"7727fb74-5466-4e5e-bdaa-1c4afd8a39bb","Type":"ContainerStarted","Data":"0e8229ce977b959ad38d5d79beaf07e5d9e662a6c6d5cb58fac740a9b2a348cd"} Nov 21 13:56:52 crc kubenswrapper[5133]: I1121 13:56:52.888539 5133 generic.go:334] "Generic (PLEG): container finished" podID="2664fda7-42fc-42a7-9c72-ccd5ce36a862" containerID="8a01e079637c75dd0e6b575c1a8609dac6a9742948c21b373b29ac2f7bc074e2" exitCode=0 Nov 21 13:56:52 crc kubenswrapper[5133]: I1121 13:56:52.888691 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprr57" event={"ID":"2664fda7-42fc-42a7-9c72-ccd5ce36a862","Type":"ContainerDied","Data":"8a01e079637c75dd0e6b575c1a8609dac6a9742948c21b373b29ac2f7bc074e2"} Nov 21 13:56:53 crc kubenswrapper[5133]: I1121 13:56:53.311526 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 13:56:53 crc kubenswrapper[5133]: I1121 13:56:53.311598 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 13:56:53 crc kubenswrapper[5133]: I1121 13:56:53.311652 5133 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" Nov 21 13:56:53 crc kubenswrapper[5133]: I1121 13:56:53.312230 5133 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"8270d91a0dc8e867dadddbaf1606a312bc821670bb959c0005ca6906c1d15307"} pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 13:56:53 crc kubenswrapper[5133]: I1121 13:56:53.312307 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" containerID="cri-o://8270d91a0dc8e867dadddbaf1606a312bc821670bb959c0005ca6906c1d15307" gracePeriod=600 Nov 21 13:56:53 crc kubenswrapper[5133]: I1121 13:56:53.904096 5133 generic.go:334] "Generic (PLEG): container finished" podID="2664fda7-42fc-42a7-9c72-ccd5ce36a862" containerID="7570a44daf0ae827bd980f9fc37860090fb65ff7e6d96baf64d432285a4de6a8" exitCode=0 Nov 21 13:56:53 crc kubenswrapper[5133]: I1121 13:56:53.904192 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprr57" event={"ID":"2664fda7-42fc-42a7-9c72-ccd5ce36a862","Type":"ContainerDied","Data":"7570a44daf0ae827bd980f9fc37860090fb65ff7e6d96baf64d432285a4de6a8"} Nov 21 13:56:53 crc kubenswrapper[5133]: I1121 13:56:53.907756 5133 generic.go:334] "Generic (PLEG): container finished" podID="52f5a729-05d1-4f84-a216-1df3233af57d" containerID="8270d91a0dc8e867dadddbaf1606a312bc821670bb959c0005ca6906c1d15307" exitCode=0 Nov 21 13:56:53 crc kubenswrapper[5133]: I1121 13:56:53.907825 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" event={"ID":"52f5a729-05d1-4f84-a216-1df3233af57d","Type":"ContainerDied","Data":"8270d91a0dc8e867dadddbaf1606a312bc821670bb959c0005ca6906c1d15307"} Nov 21 13:56:53 crc kubenswrapper[5133]: I1121 13:56:53.908123 5133 scope.go:117] "RemoveContainer" containerID="603056172cb84b83a56a09f7ae6762f8e617df9b4803981cd538b0aaab00d221" Nov 21 13:56:54 crc kubenswrapper[5133]: I1121 13:56:54.915119 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" event={"ID":"52f5a729-05d1-4f84-a216-1df3233af57d","Type":"ContainerStarted","Data":"aeb58eef34dec1c617396ba99663df9426cb6865e9e12764d8c5ffc72116a175"} Nov 21 13:56:54 crc kubenswrapper[5133]: I1121 13:56:54.916827 5133 generic.go:334] "Generic (PLEG): container finished" podID="7727fb74-5466-4e5e-bdaa-1c4afd8a39bb" containerID="b937b8e99ac8dc8e5605df4ffefb3e2f0823e29f25fdd100b7cd1ac0193130ba" exitCode=0 Nov 21 13:56:54 crc kubenswrapper[5133]: I1121 13:56:54.917063 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gsn76" event={"ID":"7727fb74-5466-4e5e-bdaa-1c4afd8a39bb","Type":"ContainerDied","Data":"b937b8e99ac8dc8e5605df4ffefb3e2f0823e29f25fdd100b7cd1ac0193130ba"} Nov 21 13:56:55 crc kubenswrapper[5133]: I1121 13:56:55.249056 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprr57" Nov 21 13:56:55 crc kubenswrapper[5133]: I1121 13:56:55.429052 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xm4zm\" (UniqueName: \"kubernetes.io/projected/2664fda7-42fc-42a7-9c72-ccd5ce36a862-kube-api-access-xm4zm\") pod \"2664fda7-42fc-42a7-9c72-ccd5ce36a862\" (UID: \"2664fda7-42fc-42a7-9c72-ccd5ce36a862\") " Nov 21 13:56:55 crc kubenswrapper[5133]: I1121 13:56:55.429126 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2664fda7-42fc-42a7-9c72-ccd5ce36a862-bundle\") pod \"2664fda7-42fc-42a7-9c72-ccd5ce36a862\" (UID: \"2664fda7-42fc-42a7-9c72-ccd5ce36a862\") " Nov 21 13:56:55 crc kubenswrapper[5133]: I1121 13:56:55.429235 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2664fda7-42fc-42a7-9c72-ccd5ce36a862-util\") pod \"2664fda7-42fc-42a7-9c72-ccd5ce36a862\" (UID: \"2664fda7-42fc-42a7-9c72-ccd5ce36a862\") " Nov 21 13:56:55 crc kubenswrapper[5133]: I1121 13:56:55.429681 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2664fda7-42fc-42a7-9c72-ccd5ce36a862-bundle" (OuterVolumeSpecName: "bundle") pod "2664fda7-42fc-42a7-9c72-ccd5ce36a862" (UID: "2664fda7-42fc-42a7-9c72-ccd5ce36a862"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 13:56:55 crc kubenswrapper[5133]: I1121 13:56:55.436030 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2664fda7-42fc-42a7-9c72-ccd5ce36a862-kube-api-access-xm4zm" (OuterVolumeSpecName: "kube-api-access-xm4zm") pod "2664fda7-42fc-42a7-9c72-ccd5ce36a862" (UID: "2664fda7-42fc-42a7-9c72-ccd5ce36a862"). InnerVolumeSpecName "kube-api-access-xm4zm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:56:55 crc kubenswrapper[5133]: I1121 13:56:55.530418 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xm4zm\" (UniqueName: \"kubernetes.io/projected/2664fda7-42fc-42a7-9c72-ccd5ce36a862-kube-api-access-xm4zm\") on node \"crc\" DevicePath \"\"" Nov 21 13:56:55 crc kubenswrapper[5133]: I1121 13:56:55.530469 5133 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2664fda7-42fc-42a7-9c72-ccd5ce36a862-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 13:56:55 crc kubenswrapper[5133]: I1121 13:56:55.927511 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprr57" Nov 21 13:56:55 crc kubenswrapper[5133]: I1121 13:56:55.927731 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprr57" event={"ID":"2664fda7-42fc-42a7-9c72-ccd5ce36a862","Type":"ContainerDied","Data":"57438773a6573a4a15c3f37adf30577b34bef7f74df08aef981a6b178acbb781"} Nov 21 13:56:55 crc kubenswrapper[5133]: I1121 13:56:55.927762 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="57438773a6573a4a15c3f37adf30577b34bef7f74df08aef981a6b178acbb781" Nov 21 13:56:56 crc kubenswrapper[5133]: I1121 13:56:56.011355 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2664fda7-42fc-42a7-9c72-ccd5ce36a862-util" (OuterVolumeSpecName: "util") pod "2664fda7-42fc-42a7-9c72-ccd5ce36a862" (UID: "2664fda7-42fc-42a7-9c72-ccd5ce36a862"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 13:56:56 crc kubenswrapper[5133]: I1121 13:56:56.042470 5133 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2664fda7-42fc-42a7-9c72-ccd5ce36a862-util\") on node \"crc\" DevicePath \"\"" Nov 21 13:56:57 crc kubenswrapper[5133]: I1121 13:56:57.954236 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gsn76" event={"ID":"7727fb74-5466-4e5e-bdaa-1c4afd8a39bb","Type":"ContainerStarted","Data":"32bb96d5fd209a957b1b1a1c4a5997d80591f4f316f84fe5dae85113e3c39834"} Nov 21 13:56:57 crc kubenswrapper[5133]: I1121 13:56:57.983196 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-gsn76" podStartSLOduration=3.469869733 podStartE2EDuration="6.983171025s" podCreationTimestamp="2025-11-21 13:56:51 +0000 UTC" firstStartedPulling="2025-11-21 13:56:52.888169374 +0000 UTC m=+872.686001622" lastFinishedPulling="2025-11-21 13:56:56.401470666 +0000 UTC m=+876.199302914" observedRunningTime="2025-11-21 13:56:57.97813634 +0000 UTC m=+877.775968598" watchObservedRunningTime="2025-11-21 13:56:57.983171025 +0000 UTC m=+877.781003273" Nov 21 13:56:58 crc kubenswrapper[5133]: I1121 13:56:58.727463 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-cwv52"] Nov 21 13:56:58 crc kubenswrapper[5133]: E1121 13:56:58.727770 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2664fda7-42fc-42a7-9c72-ccd5ce36a862" containerName="pull" Nov 21 13:56:58 crc kubenswrapper[5133]: I1121 13:56:58.727787 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="2664fda7-42fc-42a7-9c72-ccd5ce36a862" containerName="pull" Nov 21 13:56:58 crc kubenswrapper[5133]: E1121 13:56:58.727805 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2664fda7-42fc-42a7-9c72-ccd5ce36a862" containerName="util" Nov 21 13:56:58 crc kubenswrapper[5133]: I1121 13:56:58.727812 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="2664fda7-42fc-42a7-9c72-ccd5ce36a862" containerName="util" Nov 21 13:56:58 crc kubenswrapper[5133]: E1121 13:56:58.727830 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2664fda7-42fc-42a7-9c72-ccd5ce36a862" containerName="extract" Nov 21 13:56:58 crc kubenswrapper[5133]: I1121 13:56:58.727838 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="2664fda7-42fc-42a7-9c72-ccd5ce36a862" containerName="extract" Nov 21 13:56:58 crc kubenswrapper[5133]: I1121 13:56:58.727934 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="2664fda7-42fc-42a7-9c72-ccd5ce36a862" containerName="extract" Nov 21 13:56:58 crc kubenswrapper[5133]: I1121 13:56:58.728414 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-557fdffb88-cwv52" Nov 21 13:56:58 crc kubenswrapper[5133]: I1121 13:56:58.730196 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Nov 21 13:56:58 crc kubenswrapper[5133]: I1121 13:56:58.730376 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-v4r75" Nov 21 13:56:58 crc kubenswrapper[5133]: I1121 13:56:58.731167 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Nov 21 13:56:58 crc kubenswrapper[5133]: I1121 13:56:58.740797 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-cwv52"] Nov 21 13:56:58 crc kubenswrapper[5133]: I1121 13:56:58.783667 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mvb9r\" (UniqueName: \"kubernetes.io/projected/d242e3ae-cf32-4eea-8360-b0f6fab0d5af-kube-api-access-mvb9r\") pod \"nmstate-operator-557fdffb88-cwv52\" (UID: \"d242e3ae-cf32-4eea-8360-b0f6fab0d5af\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-cwv52" Nov 21 13:56:58 crc kubenswrapper[5133]: I1121 13:56:58.884708 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mvb9r\" (UniqueName: \"kubernetes.io/projected/d242e3ae-cf32-4eea-8360-b0f6fab0d5af-kube-api-access-mvb9r\") pod \"nmstate-operator-557fdffb88-cwv52\" (UID: \"d242e3ae-cf32-4eea-8360-b0f6fab0d5af\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-cwv52" Nov 21 13:56:58 crc kubenswrapper[5133]: I1121 13:56:58.906802 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mvb9r\" (UniqueName: \"kubernetes.io/projected/d242e3ae-cf32-4eea-8360-b0f6fab0d5af-kube-api-access-mvb9r\") pod \"nmstate-operator-557fdffb88-cwv52\" (UID: \"d242e3ae-cf32-4eea-8360-b0f6fab0d5af\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-cwv52" Nov 21 13:56:59 crc kubenswrapper[5133]: I1121 13:56:59.078806 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-557fdffb88-cwv52" Nov 21 13:56:59 crc kubenswrapper[5133]: I1121 13:56:59.310463 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-cwv52"] Nov 21 13:56:59 crc kubenswrapper[5133]: I1121 13:56:59.965088 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-557fdffb88-cwv52" event={"ID":"d242e3ae-cf32-4eea-8360-b0f6fab0d5af","Type":"ContainerStarted","Data":"220ee58542b0b5d35e6dc8781659d0a40680c2fcccbb64c9aa8e5f081369d738"} Nov 21 13:57:01 crc kubenswrapper[5133]: I1121 13:57:01.703699 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-gsn76" Nov 21 13:57:01 crc kubenswrapper[5133]: I1121 13:57:01.706145 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-gsn76" Nov 21 13:57:02 crc kubenswrapper[5133]: I1121 13:57:02.758944 5133 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-gsn76" podUID="7727fb74-5466-4e5e-bdaa-1c4afd8a39bb" containerName="registry-server" probeResult="failure" output=< Nov 21 13:57:02 crc kubenswrapper[5133]: timeout: failed to connect service ":50051" within 1s Nov 21 13:57:02 crc kubenswrapper[5133]: > Nov 21 13:57:09 crc kubenswrapper[5133]: E1121 13:57:09.630594 5133 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = copying system image from manifest list: determining manifest MIME type for docker://registry.redhat.io/openshift4/kubernetes-nmstate-rhel9-operator@sha256:6fc6e3fbd56357fbcfc8fadb7e3ffd83a5462d5ba32e1b3c04c35734734d3759: reading manifest sha256:fe02c6516957dd4793efbd72177a9be904c498ec2cc792492d8e4655ce7e8e45 in registry.redhat.io/openshift4/kubernetes-nmstate-rhel9-operator: received unexpected HTTP status: 504 Gateway Timeout" image="registry.redhat.io/openshift4/kubernetes-nmstate-rhel9-operator@sha256:6fc6e3fbd56357fbcfc8fadb7e3ffd83a5462d5ba32e1b3c04c35734734d3759" Nov 21 13:57:09 crc kubenswrapper[5133]: E1121 13:57:09.631349 5133 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:nmstate-operator,Image:registry.redhat.io/openshift4/kubernetes-nmstate-rhel9-operator@sha256:6fc6e3fbd56357fbcfc8fadb7e3ffd83a5462d5ba32e1b3c04c35734734d3759,Command:[manager],Args:[--zap-time-encoding=iso8601],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:WATCH_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.annotations['olm.targetNamespaces'],},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:OPERATOR_NAME,Value:kubernetes-nmstate-operator,ValueFrom:nil,},EnvVar{Name:ENABLE_PROFILER,Value:False,ValueFrom:nil,},EnvVar{Name:PROFILER_PORT,Value:6060,ValueFrom:nil,},EnvVar{Name:RUN_OPERATOR,Value:,ValueFrom:nil,},EnvVar{Name:HANDLER_IMAGE,Value:registry.redhat.io/openshift4/ose-kubernetes-nmstate-handler-rhel9@sha256:8b42f29676503074095f2837b044f2e228eaff3b25ab9a4c7c6165cb5d4c6892,ValueFrom:nil,},EnvVar{Name:PLUGIN_IMAGE,Value:registry.redhat.io/openshift4/nmstate-console-plugin-rhel9@sha256:28f45d58dd63ed46060520ad7506e37abfe0796632c671ce937478089ec8df2f,ValueFrom:nil,},EnvVar{Name:HANDLER_IMAGE_PULL_POLICY,Value:Always,ValueFrom:nil,},EnvVar{Name:HANDLER_NAMESPACE,Value:openshift-nmstate,ValueFrom:nil,},EnvVar{Name:MONITORING_NAMESPACE,Value:openshift-monitoring,ValueFrom:nil,},EnvVar{Name:KUBE_RBAC_PROXY_IMAGE,Value:registry.redhat.io/openshift4/ose-kube-rbac-proxy-rhel9@sha256:82f625207c3d7ff49293404606d2a2436b242b24ddda1e116c2b331de20e43e1,ValueFrom:nil,},EnvVar{Name:OPERATOR_CONDITION_NAME,Value:kubernetes-nmstate-operator.4.18.0-202511050949,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{60 -3} {} 60m DecimalSI},memory: {{31457280 0} {} 30Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-mvb9r,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000690000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nmstate-operator-557fdffb88-cwv52_openshift-nmstate(d242e3ae-cf32-4eea-8360-b0f6fab0d5af): ErrImagePull: copying system image from manifest list: determining manifest MIME type for docker://registry.redhat.io/openshift4/kubernetes-nmstate-rhel9-operator@sha256:6fc6e3fbd56357fbcfc8fadb7e3ffd83a5462d5ba32e1b3c04c35734734d3759: reading manifest sha256:fe02c6516957dd4793efbd72177a9be904c498ec2cc792492d8e4655ce7e8e45 in registry.redhat.io/openshift4/kubernetes-nmstate-rhel9-operator: received unexpected HTTP status: 504 Gateway Timeout" logger="UnhandledError" Nov 21 13:57:09 crc kubenswrapper[5133]: E1121 13:57:09.632652 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nmstate-operator\" with ErrImagePull: \"copying system image from manifest list: determining manifest MIME type for docker://registry.redhat.io/openshift4/kubernetes-nmstate-rhel9-operator@sha256:6fc6e3fbd56357fbcfc8fadb7e3ffd83a5462d5ba32e1b3c04c35734734d3759: reading manifest sha256:fe02c6516957dd4793efbd72177a9be904c498ec2cc792492d8e4655ce7e8e45 in registry.redhat.io/openshift4/kubernetes-nmstate-rhel9-operator: received unexpected HTTP status: 504 Gateway Timeout\"" pod="openshift-nmstate/nmstate-operator-557fdffb88-cwv52" podUID="d242e3ae-cf32-4eea-8360-b0f6fab0d5af" Nov 21 13:57:10 crc kubenswrapper[5133]: E1121 13:57:10.028940 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nmstate-operator\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/kubernetes-nmstate-rhel9-operator@sha256:6fc6e3fbd56357fbcfc8fadb7e3ffd83a5462d5ba32e1b3c04c35734734d3759\\\"\"" pod="openshift-nmstate/nmstate-operator-557fdffb88-cwv52" podUID="d242e3ae-cf32-4eea-8360-b0f6fab0d5af" Nov 21 13:57:11 crc kubenswrapper[5133]: I1121 13:57:11.779346 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-gsn76" Nov 21 13:57:11 crc kubenswrapper[5133]: I1121 13:57:11.819233 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-gsn76" Nov 21 13:57:12 crc kubenswrapper[5133]: I1121 13:57:12.017031 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-gsn76"] Nov 21 13:57:13 crc kubenswrapper[5133]: I1121 13:57:13.046320 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-gsn76" podUID="7727fb74-5466-4e5e-bdaa-1c4afd8a39bb" containerName="registry-server" containerID="cri-o://32bb96d5fd209a957b1b1a1c4a5997d80591f4f316f84fe5dae85113e3c39834" gracePeriod=2 Nov 21 13:57:13 crc kubenswrapper[5133]: I1121 13:57:13.963582 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gsn76" Nov 21 13:57:14 crc kubenswrapper[5133]: I1121 13:57:14.062536 5133 generic.go:334] "Generic (PLEG): container finished" podID="7727fb74-5466-4e5e-bdaa-1c4afd8a39bb" containerID="32bb96d5fd209a957b1b1a1c4a5997d80591f4f316f84fe5dae85113e3c39834" exitCode=0 Nov 21 13:57:14 crc kubenswrapper[5133]: I1121 13:57:14.062625 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gsn76" event={"ID":"7727fb74-5466-4e5e-bdaa-1c4afd8a39bb","Type":"ContainerDied","Data":"32bb96d5fd209a957b1b1a1c4a5997d80591f4f316f84fe5dae85113e3c39834"} Nov 21 13:57:14 crc kubenswrapper[5133]: I1121 13:57:14.062688 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gsn76" event={"ID":"7727fb74-5466-4e5e-bdaa-1c4afd8a39bb","Type":"ContainerDied","Data":"0e8229ce977b959ad38d5d79beaf07e5d9e662a6c6d5cb58fac740a9b2a348cd"} Nov 21 13:57:14 crc kubenswrapper[5133]: I1121 13:57:14.062711 5133 scope.go:117] "RemoveContainer" containerID="32bb96d5fd209a957b1b1a1c4a5997d80591f4f316f84fe5dae85113e3c39834" Nov 21 13:57:14 crc kubenswrapper[5133]: I1121 13:57:14.062806 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gsn76" Nov 21 13:57:14 crc kubenswrapper[5133]: I1121 13:57:14.082588 5133 scope.go:117] "RemoveContainer" containerID="b937b8e99ac8dc8e5605df4ffefb3e2f0823e29f25fdd100b7cd1ac0193130ba" Nov 21 13:57:14 crc kubenswrapper[5133]: I1121 13:57:14.098229 5133 scope.go:117] "RemoveContainer" containerID="ad6a079bf53472e6b896baf694f80ff902b4fb8447cb467cfa68f83b7ed11ed2" Nov 21 13:57:14 crc kubenswrapper[5133]: I1121 13:57:14.101430 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k7bp7\" (UniqueName: \"kubernetes.io/projected/7727fb74-5466-4e5e-bdaa-1c4afd8a39bb-kube-api-access-k7bp7\") pod \"7727fb74-5466-4e5e-bdaa-1c4afd8a39bb\" (UID: \"7727fb74-5466-4e5e-bdaa-1c4afd8a39bb\") " Nov 21 13:57:14 crc kubenswrapper[5133]: I1121 13:57:14.101497 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7727fb74-5466-4e5e-bdaa-1c4afd8a39bb-catalog-content\") pod \"7727fb74-5466-4e5e-bdaa-1c4afd8a39bb\" (UID: \"7727fb74-5466-4e5e-bdaa-1c4afd8a39bb\") " Nov 21 13:57:14 crc kubenswrapper[5133]: I1121 13:57:14.101579 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7727fb74-5466-4e5e-bdaa-1c4afd8a39bb-utilities\") pod \"7727fb74-5466-4e5e-bdaa-1c4afd8a39bb\" (UID: \"7727fb74-5466-4e5e-bdaa-1c4afd8a39bb\") " Nov 21 13:57:14 crc kubenswrapper[5133]: I1121 13:57:14.102822 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7727fb74-5466-4e5e-bdaa-1c4afd8a39bb-utilities" (OuterVolumeSpecName: "utilities") pod "7727fb74-5466-4e5e-bdaa-1c4afd8a39bb" (UID: "7727fb74-5466-4e5e-bdaa-1c4afd8a39bb"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 13:57:14 crc kubenswrapper[5133]: I1121 13:57:14.109877 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7727fb74-5466-4e5e-bdaa-1c4afd8a39bb-kube-api-access-k7bp7" (OuterVolumeSpecName: "kube-api-access-k7bp7") pod "7727fb74-5466-4e5e-bdaa-1c4afd8a39bb" (UID: "7727fb74-5466-4e5e-bdaa-1c4afd8a39bb"). InnerVolumeSpecName "kube-api-access-k7bp7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:57:14 crc kubenswrapper[5133]: I1121 13:57:14.120199 5133 scope.go:117] "RemoveContainer" containerID="32bb96d5fd209a957b1b1a1c4a5997d80591f4f316f84fe5dae85113e3c39834" Nov 21 13:57:14 crc kubenswrapper[5133]: E1121 13:57:14.120895 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"32bb96d5fd209a957b1b1a1c4a5997d80591f4f316f84fe5dae85113e3c39834\": container with ID starting with 32bb96d5fd209a957b1b1a1c4a5997d80591f4f316f84fe5dae85113e3c39834 not found: ID does not exist" containerID="32bb96d5fd209a957b1b1a1c4a5997d80591f4f316f84fe5dae85113e3c39834" Nov 21 13:57:14 crc kubenswrapper[5133]: I1121 13:57:14.121032 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"32bb96d5fd209a957b1b1a1c4a5997d80591f4f316f84fe5dae85113e3c39834"} err="failed to get container status \"32bb96d5fd209a957b1b1a1c4a5997d80591f4f316f84fe5dae85113e3c39834\": rpc error: code = NotFound desc = could not find container \"32bb96d5fd209a957b1b1a1c4a5997d80591f4f316f84fe5dae85113e3c39834\": container with ID starting with 32bb96d5fd209a957b1b1a1c4a5997d80591f4f316f84fe5dae85113e3c39834 not found: ID does not exist" Nov 21 13:57:14 crc kubenswrapper[5133]: I1121 13:57:14.121157 5133 scope.go:117] "RemoveContainer" containerID="b937b8e99ac8dc8e5605df4ffefb3e2f0823e29f25fdd100b7cd1ac0193130ba" Nov 21 13:57:14 crc kubenswrapper[5133]: E1121 13:57:14.121714 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b937b8e99ac8dc8e5605df4ffefb3e2f0823e29f25fdd100b7cd1ac0193130ba\": container with ID starting with b937b8e99ac8dc8e5605df4ffefb3e2f0823e29f25fdd100b7cd1ac0193130ba not found: ID does not exist" containerID="b937b8e99ac8dc8e5605df4ffefb3e2f0823e29f25fdd100b7cd1ac0193130ba" Nov 21 13:57:14 crc kubenswrapper[5133]: I1121 13:57:14.121762 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b937b8e99ac8dc8e5605df4ffefb3e2f0823e29f25fdd100b7cd1ac0193130ba"} err="failed to get container status \"b937b8e99ac8dc8e5605df4ffefb3e2f0823e29f25fdd100b7cd1ac0193130ba\": rpc error: code = NotFound desc = could not find container \"b937b8e99ac8dc8e5605df4ffefb3e2f0823e29f25fdd100b7cd1ac0193130ba\": container with ID starting with b937b8e99ac8dc8e5605df4ffefb3e2f0823e29f25fdd100b7cd1ac0193130ba not found: ID does not exist" Nov 21 13:57:14 crc kubenswrapper[5133]: I1121 13:57:14.121802 5133 scope.go:117] "RemoveContainer" containerID="ad6a079bf53472e6b896baf694f80ff902b4fb8447cb467cfa68f83b7ed11ed2" Nov 21 13:57:14 crc kubenswrapper[5133]: E1121 13:57:14.122223 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ad6a079bf53472e6b896baf694f80ff902b4fb8447cb467cfa68f83b7ed11ed2\": container with ID starting with ad6a079bf53472e6b896baf694f80ff902b4fb8447cb467cfa68f83b7ed11ed2 not found: ID does not exist" containerID="ad6a079bf53472e6b896baf694f80ff902b4fb8447cb467cfa68f83b7ed11ed2" Nov 21 13:57:14 crc kubenswrapper[5133]: I1121 13:57:14.122341 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ad6a079bf53472e6b896baf694f80ff902b4fb8447cb467cfa68f83b7ed11ed2"} err="failed to get container status \"ad6a079bf53472e6b896baf694f80ff902b4fb8447cb467cfa68f83b7ed11ed2\": rpc error: code = NotFound desc = could not find container \"ad6a079bf53472e6b896baf694f80ff902b4fb8447cb467cfa68f83b7ed11ed2\": container with ID starting with ad6a079bf53472e6b896baf694f80ff902b4fb8447cb467cfa68f83b7ed11ed2 not found: ID does not exist" Nov 21 13:57:14 crc kubenswrapper[5133]: I1121 13:57:14.197231 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7727fb74-5466-4e5e-bdaa-1c4afd8a39bb-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7727fb74-5466-4e5e-bdaa-1c4afd8a39bb" (UID: "7727fb74-5466-4e5e-bdaa-1c4afd8a39bb"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 13:57:14 crc kubenswrapper[5133]: I1121 13:57:14.203414 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k7bp7\" (UniqueName: \"kubernetes.io/projected/7727fb74-5466-4e5e-bdaa-1c4afd8a39bb-kube-api-access-k7bp7\") on node \"crc\" DevicePath \"\"" Nov 21 13:57:14 crc kubenswrapper[5133]: I1121 13:57:14.203478 5133 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7727fb74-5466-4e5e-bdaa-1c4afd8a39bb-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 13:57:14 crc kubenswrapper[5133]: I1121 13:57:14.203521 5133 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7727fb74-5466-4e5e-bdaa-1c4afd8a39bb-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 13:57:14 crc kubenswrapper[5133]: I1121 13:57:14.397087 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-gsn76"] Nov 21 13:57:14 crc kubenswrapper[5133]: I1121 13:57:14.400479 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-gsn76"] Nov 21 13:57:14 crc kubenswrapper[5133]: I1121 13:57:14.468292 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7727fb74-5466-4e5e-bdaa-1c4afd8a39bb" path="/var/lib/kubelet/pods/7727fb74-5466-4e5e-bdaa-1c4afd8a39bb/volumes" Nov 21 13:57:27 crc kubenswrapper[5133]: I1121 13:57:27.164110 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-557fdffb88-cwv52" event={"ID":"d242e3ae-cf32-4eea-8360-b0f6fab0d5af","Type":"ContainerStarted","Data":"851df5f5156f646b9745340da64cab13c9627ed95ebca28690df5de494d652bc"} Nov 21 13:57:27 crc kubenswrapper[5133]: I1121 13:57:27.188893 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-557fdffb88-cwv52" podStartSLOduration=2.397111286 podStartE2EDuration="29.188868993s" podCreationTimestamp="2025-11-21 13:56:58 +0000 UTC" firstStartedPulling="2025-11-21 13:56:59.322118788 +0000 UTC m=+879.119951036" lastFinishedPulling="2025-11-21 13:57:26.113876495 +0000 UTC m=+905.911708743" observedRunningTime="2025-11-21 13:57:27.186783017 +0000 UTC m=+906.984615265" watchObservedRunningTime="2025-11-21 13:57:27.188868993 +0000 UTC m=+906.986701251" Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.292023 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-txndq"] Nov 21 13:57:28 crc kubenswrapper[5133]: E1121 13:57:28.292339 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7727fb74-5466-4e5e-bdaa-1c4afd8a39bb" containerName="extract-utilities" Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.292358 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="7727fb74-5466-4e5e-bdaa-1c4afd8a39bb" containerName="extract-utilities" Nov 21 13:57:28 crc kubenswrapper[5133]: E1121 13:57:28.292384 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7727fb74-5466-4e5e-bdaa-1c4afd8a39bb" containerName="extract-content" Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.292396 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="7727fb74-5466-4e5e-bdaa-1c4afd8a39bb" containerName="extract-content" Nov 21 13:57:28 crc kubenswrapper[5133]: E1121 13:57:28.292415 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7727fb74-5466-4e5e-bdaa-1c4afd8a39bb" containerName="registry-server" Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.292427 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="7727fb74-5466-4e5e-bdaa-1c4afd8a39bb" containerName="registry-server" Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.292563 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="7727fb74-5466-4e5e-bdaa-1c4afd8a39bb" containerName="registry-server" Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.293337 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-txndq" Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.296839 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-kf8jw" Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.304334 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-txndq"] Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.324174 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tw6cp\" (UniqueName: \"kubernetes.io/projected/db154bb8-c484-469b-89f3-d84a11ff8eae-kube-api-access-tw6cp\") pod \"nmstate-metrics-5dcf9c57c5-txndq\" (UID: \"db154bb8-c484-469b-89f3-d84a11ff8eae\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-txndq" Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.371326 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-b474p"] Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.373897 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-b474p" Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.382884 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.387724 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-pb5nh"] Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.389035 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-pb5nh" Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.404790 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-b474p"] Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.425537 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/0b59ced3-80c3-4991-8005-600e0d36c2b3-dbus-socket\") pod \"nmstate-handler-pb5nh\" (UID: \"0b59ced3-80c3-4991-8005-600e0d36c2b3\") " pod="openshift-nmstate/nmstate-handler-pb5nh" Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.425616 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/0b59ced3-80c3-4991-8005-600e0d36c2b3-ovs-socket\") pod \"nmstate-handler-pb5nh\" (UID: \"0b59ced3-80c3-4991-8005-600e0d36c2b3\") " pod="openshift-nmstate/nmstate-handler-pb5nh" Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.425660 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w9frq\" (UniqueName: \"kubernetes.io/projected/b8d952ba-d2a8-46a8-b2de-b47a5ad448b7-kube-api-access-w9frq\") pod \"nmstate-webhook-6b89b748d8-b474p\" (UID: \"b8d952ba-d2a8-46a8-b2de-b47a5ad448b7\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-b474p" Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.425695 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gp6jw\" (UniqueName: \"kubernetes.io/projected/0b59ced3-80c3-4991-8005-600e0d36c2b3-kube-api-access-gp6jw\") pod \"nmstate-handler-pb5nh\" (UID: \"0b59ced3-80c3-4991-8005-600e0d36c2b3\") " pod="openshift-nmstate/nmstate-handler-pb5nh" Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.425724 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tw6cp\" (UniqueName: \"kubernetes.io/projected/db154bb8-c484-469b-89f3-d84a11ff8eae-kube-api-access-tw6cp\") pod \"nmstate-metrics-5dcf9c57c5-txndq\" (UID: \"db154bb8-c484-469b-89f3-d84a11ff8eae\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-txndq" Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.425758 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/b8d952ba-d2a8-46a8-b2de-b47a5ad448b7-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-b474p\" (UID: \"b8d952ba-d2a8-46a8-b2de-b47a5ad448b7\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-b474p" Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.425780 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/0b59ced3-80c3-4991-8005-600e0d36c2b3-nmstate-lock\") pod \"nmstate-handler-pb5nh\" (UID: \"0b59ced3-80c3-4991-8005-600e0d36c2b3\") " pod="openshift-nmstate/nmstate-handler-pb5nh" Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.449508 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tw6cp\" (UniqueName: \"kubernetes.io/projected/db154bb8-c484-469b-89f3-d84a11ff8eae-kube-api-access-tw6cp\") pod \"nmstate-metrics-5dcf9c57c5-txndq\" (UID: \"db154bb8-c484-469b-89f3-d84a11ff8eae\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-txndq" Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.491485 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-9vb4n"] Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.496569 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-9vb4n" Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.499420 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.499870 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-r96br" Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.500060 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.506566 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-9vb4n"] Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.526683 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gp6jw\" (UniqueName: \"kubernetes.io/projected/0b59ced3-80c3-4991-8005-600e0d36c2b3-kube-api-access-gp6jw\") pod \"nmstate-handler-pb5nh\" (UID: \"0b59ced3-80c3-4991-8005-600e0d36c2b3\") " pod="openshift-nmstate/nmstate-handler-pb5nh" Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.526749 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/b8d952ba-d2a8-46a8-b2de-b47a5ad448b7-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-b474p\" (UID: \"b8d952ba-d2a8-46a8-b2de-b47a5ad448b7\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-b474p" Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.526771 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/0b59ced3-80c3-4991-8005-600e0d36c2b3-nmstate-lock\") pod \"nmstate-handler-pb5nh\" (UID: \"0b59ced3-80c3-4991-8005-600e0d36c2b3\") " pod="openshift-nmstate/nmstate-handler-pb5nh" Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.526801 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/0b59ced3-80c3-4991-8005-600e0d36c2b3-dbus-socket\") pod \"nmstate-handler-pb5nh\" (UID: \"0b59ced3-80c3-4991-8005-600e0d36c2b3\") " pod="openshift-nmstate/nmstate-handler-pb5nh" Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.526827 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/d22f52b5-ba71-44ff-8877-63ab436f5683-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-9vb4n\" (UID: \"d22f52b5-ba71-44ff-8877-63ab436f5683\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-9vb4n" Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.526860 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w8998\" (UniqueName: \"kubernetes.io/projected/d22f52b5-ba71-44ff-8877-63ab436f5683-kube-api-access-w8998\") pod \"nmstate-console-plugin-5874bd7bc5-9vb4n\" (UID: \"d22f52b5-ba71-44ff-8877-63ab436f5683\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-9vb4n" Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.526903 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/d22f52b5-ba71-44ff-8877-63ab436f5683-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-9vb4n\" (UID: \"d22f52b5-ba71-44ff-8877-63ab436f5683\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-9vb4n" Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.526931 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/0b59ced3-80c3-4991-8005-600e0d36c2b3-ovs-socket\") pod \"nmstate-handler-pb5nh\" (UID: \"0b59ced3-80c3-4991-8005-600e0d36c2b3\") " pod="openshift-nmstate/nmstate-handler-pb5nh" Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.526953 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w9frq\" (UniqueName: \"kubernetes.io/projected/b8d952ba-d2a8-46a8-b2de-b47a5ad448b7-kube-api-access-w9frq\") pod \"nmstate-webhook-6b89b748d8-b474p\" (UID: \"b8d952ba-d2a8-46a8-b2de-b47a5ad448b7\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-b474p" Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.526975 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/0b59ced3-80c3-4991-8005-600e0d36c2b3-nmstate-lock\") pod \"nmstate-handler-pb5nh\" (UID: \"0b59ced3-80c3-4991-8005-600e0d36c2b3\") " pod="openshift-nmstate/nmstate-handler-pb5nh" Nov 21 13:57:28 crc kubenswrapper[5133]: E1121 13:57:28.527040 5133 secret.go:188] Couldn't get secret openshift-nmstate/openshift-nmstate-webhook: secret "openshift-nmstate-webhook" not found Nov 21 13:57:28 crc kubenswrapper[5133]: E1121 13:57:28.527205 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b8d952ba-d2a8-46a8-b2de-b47a5ad448b7-tls-key-pair podName:b8d952ba-d2a8-46a8-b2de-b47a5ad448b7 nodeName:}" failed. No retries permitted until 2025-11-21 13:57:29.027173979 +0000 UTC m=+908.825006227 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "tls-key-pair" (UniqueName: "kubernetes.io/secret/b8d952ba-d2a8-46a8-b2de-b47a5ad448b7-tls-key-pair") pod "nmstate-webhook-6b89b748d8-b474p" (UID: "b8d952ba-d2a8-46a8-b2de-b47a5ad448b7") : secret "openshift-nmstate-webhook" not found Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.527276 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/0b59ced3-80c3-4991-8005-600e0d36c2b3-ovs-socket\") pod \"nmstate-handler-pb5nh\" (UID: \"0b59ced3-80c3-4991-8005-600e0d36c2b3\") " pod="openshift-nmstate/nmstate-handler-pb5nh" Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.527391 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/0b59ced3-80c3-4991-8005-600e0d36c2b3-dbus-socket\") pod \"nmstate-handler-pb5nh\" (UID: \"0b59ced3-80c3-4991-8005-600e0d36c2b3\") " pod="openshift-nmstate/nmstate-handler-pb5nh" Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.545105 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gp6jw\" (UniqueName: \"kubernetes.io/projected/0b59ced3-80c3-4991-8005-600e0d36c2b3-kube-api-access-gp6jw\") pod \"nmstate-handler-pb5nh\" (UID: \"0b59ced3-80c3-4991-8005-600e0d36c2b3\") " pod="openshift-nmstate/nmstate-handler-pb5nh" Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.548310 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w9frq\" (UniqueName: \"kubernetes.io/projected/b8d952ba-d2a8-46a8-b2de-b47a5ad448b7-kube-api-access-w9frq\") pod \"nmstate-webhook-6b89b748d8-b474p\" (UID: \"b8d952ba-d2a8-46a8-b2de-b47a5ad448b7\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-b474p" Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.619906 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-txndq" Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.627756 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/d22f52b5-ba71-44ff-8877-63ab436f5683-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-9vb4n\" (UID: \"d22f52b5-ba71-44ff-8877-63ab436f5683\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-9vb4n" Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.628469 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w8998\" (UniqueName: \"kubernetes.io/projected/d22f52b5-ba71-44ff-8877-63ab436f5683-kube-api-access-w8998\") pod \"nmstate-console-plugin-5874bd7bc5-9vb4n\" (UID: \"d22f52b5-ba71-44ff-8877-63ab436f5683\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-9vb4n" Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.628510 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/d22f52b5-ba71-44ff-8877-63ab436f5683-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-9vb4n\" (UID: \"d22f52b5-ba71-44ff-8877-63ab436f5683\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-9vb4n" Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.629583 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/d22f52b5-ba71-44ff-8877-63ab436f5683-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-9vb4n\" (UID: \"d22f52b5-ba71-44ff-8877-63ab436f5683\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-9vb4n" Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.637460 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/d22f52b5-ba71-44ff-8877-63ab436f5683-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-9vb4n\" (UID: \"d22f52b5-ba71-44ff-8877-63ab436f5683\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-9vb4n" Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.658694 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w8998\" (UniqueName: \"kubernetes.io/projected/d22f52b5-ba71-44ff-8877-63ab436f5683-kube-api-access-w8998\") pod \"nmstate-console-plugin-5874bd7bc5-9vb4n\" (UID: \"d22f52b5-ba71-44ff-8877-63ab436f5683\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-9vb4n" Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.703044 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-68845dcc8-dkf5n"] Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.704369 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-68845dcc8-dkf5n" Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.712798 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-pb5nh" Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.729193 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/b35ea960-c9f1-4a0f-ab1a-4715a8a54ba4-console-config\") pod \"console-68845dcc8-dkf5n\" (UID: \"b35ea960-c9f1-4a0f-ab1a-4715a8a54ba4\") " pod="openshift-console/console-68845dcc8-dkf5n" Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.729261 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/b35ea960-c9f1-4a0f-ab1a-4715a8a54ba4-service-ca\") pod \"console-68845dcc8-dkf5n\" (UID: \"b35ea960-c9f1-4a0f-ab1a-4715a8a54ba4\") " pod="openshift-console/console-68845dcc8-dkf5n" Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.729285 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h22z6\" (UniqueName: \"kubernetes.io/projected/b35ea960-c9f1-4a0f-ab1a-4715a8a54ba4-kube-api-access-h22z6\") pod \"console-68845dcc8-dkf5n\" (UID: \"b35ea960-c9f1-4a0f-ab1a-4715a8a54ba4\") " pod="openshift-console/console-68845dcc8-dkf5n" Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.729308 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/b35ea960-c9f1-4a0f-ab1a-4715a8a54ba4-console-oauth-config\") pod \"console-68845dcc8-dkf5n\" (UID: \"b35ea960-c9f1-4a0f-ab1a-4715a8a54ba4\") " pod="openshift-console/console-68845dcc8-dkf5n" Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.729343 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b35ea960-c9f1-4a0f-ab1a-4715a8a54ba4-trusted-ca-bundle\") pod \"console-68845dcc8-dkf5n\" (UID: \"b35ea960-c9f1-4a0f-ab1a-4715a8a54ba4\") " pod="openshift-console/console-68845dcc8-dkf5n" Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.729367 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/b35ea960-c9f1-4a0f-ab1a-4715a8a54ba4-oauth-serving-cert\") pod \"console-68845dcc8-dkf5n\" (UID: \"b35ea960-c9f1-4a0f-ab1a-4715a8a54ba4\") " pod="openshift-console/console-68845dcc8-dkf5n" Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.729401 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/b35ea960-c9f1-4a0f-ab1a-4715a8a54ba4-console-serving-cert\") pod \"console-68845dcc8-dkf5n\" (UID: \"b35ea960-c9f1-4a0f-ab1a-4715a8a54ba4\") " pod="openshift-console/console-68845dcc8-dkf5n" Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.731621 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-68845dcc8-dkf5n"] Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.819337 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-9vb4n" Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.859205 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b35ea960-c9f1-4a0f-ab1a-4715a8a54ba4-trusted-ca-bundle\") pod \"console-68845dcc8-dkf5n\" (UID: \"b35ea960-c9f1-4a0f-ab1a-4715a8a54ba4\") " pod="openshift-console/console-68845dcc8-dkf5n" Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.859794 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/b35ea960-c9f1-4a0f-ab1a-4715a8a54ba4-oauth-serving-cert\") pod \"console-68845dcc8-dkf5n\" (UID: \"b35ea960-c9f1-4a0f-ab1a-4715a8a54ba4\") " pod="openshift-console/console-68845dcc8-dkf5n" Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.859859 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/b35ea960-c9f1-4a0f-ab1a-4715a8a54ba4-console-serving-cert\") pod \"console-68845dcc8-dkf5n\" (UID: \"b35ea960-c9f1-4a0f-ab1a-4715a8a54ba4\") " pod="openshift-console/console-68845dcc8-dkf5n" Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.859900 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/b35ea960-c9f1-4a0f-ab1a-4715a8a54ba4-console-config\") pod \"console-68845dcc8-dkf5n\" (UID: \"b35ea960-c9f1-4a0f-ab1a-4715a8a54ba4\") " pod="openshift-console/console-68845dcc8-dkf5n" Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.859956 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/b35ea960-c9f1-4a0f-ab1a-4715a8a54ba4-service-ca\") pod \"console-68845dcc8-dkf5n\" (UID: \"b35ea960-c9f1-4a0f-ab1a-4715a8a54ba4\") " pod="openshift-console/console-68845dcc8-dkf5n" Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.859989 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h22z6\" (UniqueName: \"kubernetes.io/projected/b35ea960-c9f1-4a0f-ab1a-4715a8a54ba4-kube-api-access-h22z6\") pod \"console-68845dcc8-dkf5n\" (UID: \"b35ea960-c9f1-4a0f-ab1a-4715a8a54ba4\") " pod="openshift-console/console-68845dcc8-dkf5n" Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.860034 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/b35ea960-c9f1-4a0f-ab1a-4715a8a54ba4-console-oauth-config\") pod \"console-68845dcc8-dkf5n\" (UID: \"b35ea960-c9f1-4a0f-ab1a-4715a8a54ba4\") " pod="openshift-console/console-68845dcc8-dkf5n" Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.862690 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b35ea960-c9f1-4a0f-ab1a-4715a8a54ba4-trusted-ca-bundle\") pod \"console-68845dcc8-dkf5n\" (UID: \"b35ea960-c9f1-4a0f-ab1a-4715a8a54ba4\") " pod="openshift-console/console-68845dcc8-dkf5n" Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.863389 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/b35ea960-c9f1-4a0f-ab1a-4715a8a54ba4-service-ca\") pod \"console-68845dcc8-dkf5n\" (UID: \"b35ea960-c9f1-4a0f-ab1a-4715a8a54ba4\") " pod="openshift-console/console-68845dcc8-dkf5n" Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.864019 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/b35ea960-c9f1-4a0f-ab1a-4715a8a54ba4-console-config\") pod \"console-68845dcc8-dkf5n\" (UID: \"b35ea960-c9f1-4a0f-ab1a-4715a8a54ba4\") " pod="openshift-console/console-68845dcc8-dkf5n" Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.875864 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/b35ea960-c9f1-4a0f-ab1a-4715a8a54ba4-oauth-serving-cert\") pod \"console-68845dcc8-dkf5n\" (UID: \"b35ea960-c9f1-4a0f-ab1a-4715a8a54ba4\") " pod="openshift-console/console-68845dcc8-dkf5n" Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.880552 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/b35ea960-c9f1-4a0f-ab1a-4715a8a54ba4-console-serving-cert\") pod \"console-68845dcc8-dkf5n\" (UID: \"b35ea960-c9f1-4a0f-ab1a-4715a8a54ba4\") " pod="openshift-console/console-68845dcc8-dkf5n" Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.887249 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/b35ea960-c9f1-4a0f-ab1a-4715a8a54ba4-console-oauth-config\") pod \"console-68845dcc8-dkf5n\" (UID: \"b35ea960-c9f1-4a0f-ab1a-4715a8a54ba4\") " pod="openshift-console/console-68845dcc8-dkf5n" Nov 21 13:57:28 crc kubenswrapper[5133]: I1121 13:57:28.900734 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h22z6\" (UniqueName: \"kubernetes.io/projected/b35ea960-c9f1-4a0f-ab1a-4715a8a54ba4-kube-api-access-h22z6\") pod \"console-68845dcc8-dkf5n\" (UID: \"b35ea960-c9f1-4a0f-ab1a-4715a8a54ba4\") " pod="openshift-console/console-68845dcc8-dkf5n" Nov 21 13:57:29 crc kubenswrapper[5133]: I1121 13:57:29.002711 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-txndq"] Nov 21 13:57:29 crc kubenswrapper[5133]: I1121 13:57:29.030573 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-68845dcc8-dkf5n" Nov 21 13:57:29 crc kubenswrapper[5133]: I1121 13:57:29.062503 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/b8d952ba-d2a8-46a8-b2de-b47a5ad448b7-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-b474p\" (UID: \"b8d952ba-d2a8-46a8-b2de-b47a5ad448b7\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-b474p" Nov 21 13:57:29 crc kubenswrapper[5133]: I1121 13:57:29.069702 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/b8d952ba-d2a8-46a8-b2de-b47a5ad448b7-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-b474p\" (UID: \"b8d952ba-d2a8-46a8-b2de-b47a5ad448b7\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-b474p" Nov 21 13:57:29 crc kubenswrapper[5133]: I1121 13:57:29.084827 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-9vb4n"] Nov 21 13:57:29 crc kubenswrapper[5133]: I1121 13:57:29.177152 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-pb5nh" event={"ID":"0b59ced3-80c3-4991-8005-600e0d36c2b3","Type":"ContainerStarted","Data":"e9e1ad52348781d52b55b74f85a333fb48f028b508510bd81575c11b1ca8cc4c"} Nov 21 13:57:29 crc kubenswrapper[5133]: I1121 13:57:29.178680 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-txndq" event={"ID":"db154bb8-c484-469b-89f3-d84a11ff8eae","Type":"ContainerStarted","Data":"6928b25af2af5e17713120d7c01e1433ddf39fbc9ee3c5cae1eb4ef86ba3e022"} Nov 21 13:57:29 crc kubenswrapper[5133]: I1121 13:57:29.179631 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-9vb4n" event={"ID":"d22f52b5-ba71-44ff-8877-63ab436f5683","Type":"ContainerStarted","Data":"94126094fd2e8cd0cc3bcbb4820c46e69d80b5be99bd21ee8350d0a2fda3972e"} Nov 21 13:57:29 crc kubenswrapper[5133]: I1121 13:57:29.235579 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-68845dcc8-dkf5n"] Nov 21 13:57:29 crc kubenswrapper[5133]: W1121 13:57:29.239386 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb35ea960_c9f1_4a0f_ab1a_4715a8a54ba4.slice/crio-bb6c23fd2012ffcb7f180c7b9583b5208316510e2f5f07bd12aec2ae1895bfd3 WatchSource:0}: Error finding container bb6c23fd2012ffcb7f180c7b9583b5208316510e2f5f07bd12aec2ae1895bfd3: Status 404 returned error can't find the container with id bb6c23fd2012ffcb7f180c7b9583b5208316510e2f5f07bd12aec2ae1895bfd3 Nov 21 13:57:29 crc kubenswrapper[5133]: I1121 13:57:29.303698 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-b474p" Nov 21 13:57:29 crc kubenswrapper[5133]: I1121 13:57:29.509814 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-b474p"] Nov 21 13:57:30 crc kubenswrapper[5133]: I1121 13:57:30.186932 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-b474p" event={"ID":"b8d952ba-d2a8-46a8-b2de-b47a5ad448b7","Type":"ContainerStarted","Data":"95da0a8796ccfc7e9e59a11d917170ed77ddfdeead2a158a66c70029ef384bad"} Nov 21 13:57:30 crc kubenswrapper[5133]: I1121 13:57:30.189470 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-68845dcc8-dkf5n" event={"ID":"b35ea960-c9f1-4a0f-ab1a-4715a8a54ba4","Type":"ContainerStarted","Data":"19d0f95b4bd7a15d573f2954816b7b90e4d2ca53aed8cd7429bbeca1c6451202"} Nov 21 13:57:30 crc kubenswrapper[5133]: I1121 13:57:30.189597 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-68845dcc8-dkf5n" event={"ID":"b35ea960-c9f1-4a0f-ab1a-4715a8a54ba4","Type":"ContainerStarted","Data":"bb6c23fd2012ffcb7f180c7b9583b5208316510e2f5f07bd12aec2ae1895bfd3"} Nov 21 13:57:30 crc kubenswrapper[5133]: I1121 13:57:30.213408 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-68845dcc8-dkf5n" podStartSLOduration=2.213381794 podStartE2EDuration="2.213381794s" podCreationTimestamp="2025-11-21 13:57:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 13:57:30.208898953 +0000 UTC m=+910.006731201" watchObservedRunningTime="2025-11-21 13:57:30.213381794 +0000 UTC m=+910.011214042" Nov 21 13:57:33 crc kubenswrapper[5133]: I1121 13:57:33.219428 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-b474p" event={"ID":"b8d952ba-d2a8-46a8-b2de-b47a5ad448b7","Type":"ContainerStarted","Data":"224eb4a9dade95722d41919f76ebc9e559e74cc8859c198d37f7da6377447bd0"} Nov 21 13:57:33 crc kubenswrapper[5133]: I1121 13:57:33.221967 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-b474p" Nov 21 13:57:33 crc kubenswrapper[5133]: I1121 13:57:33.223928 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-pb5nh" event={"ID":"0b59ced3-80c3-4991-8005-600e0d36c2b3","Type":"ContainerStarted","Data":"3131174568db425aa95adaac9a99277192dcf52e824296b2efb9a122a7d1ff50"} Nov 21 13:57:33 crc kubenswrapper[5133]: I1121 13:57:33.224087 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-pb5nh" Nov 21 13:57:33 crc kubenswrapper[5133]: I1121 13:57:33.227766 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-txndq" event={"ID":"db154bb8-c484-469b-89f3-d84a11ff8eae","Type":"ContainerStarted","Data":"5333d966ccae77eaac4a4bec44be4ecc34532b1e81e5a6893b72d27e59f3f273"} Nov 21 13:57:33 crc kubenswrapper[5133]: I1121 13:57:33.230324 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-9vb4n" event={"ID":"d22f52b5-ba71-44ff-8877-63ab436f5683","Type":"ContainerStarted","Data":"9a30431e739b335c5843f2ae31ce587e955891dc50d899df191830fb610e15d7"} Nov 21 13:57:33 crc kubenswrapper[5133]: I1121 13:57:33.247233 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-b474p" podStartSLOduration=2.749661328 podStartE2EDuration="5.247209854s" podCreationTimestamp="2025-11-21 13:57:28 +0000 UTC" firstStartedPulling="2025-11-21 13:57:29.536918974 +0000 UTC m=+909.334751222" lastFinishedPulling="2025-11-21 13:57:32.0344675 +0000 UTC m=+911.832299748" observedRunningTime="2025-11-21 13:57:33.24443329 +0000 UTC m=+913.042265608" watchObservedRunningTime="2025-11-21 13:57:33.247209854 +0000 UTC m=+913.045042102" Nov 21 13:57:33 crc kubenswrapper[5133]: I1121 13:57:33.271349 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-pb5nh" podStartSLOduration=1.9917612139999998 podStartE2EDuration="5.271300081s" podCreationTimestamp="2025-11-21 13:57:28 +0000 UTC" firstStartedPulling="2025-11-21 13:57:28.754836 +0000 UTC m=+908.552668248" lastFinishedPulling="2025-11-21 13:57:32.034374867 +0000 UTC m=+911.832207115" observedRunningTime="2025-11-21 13:57:33.268443304 +0000 UTC m=+913.066275572" watchObservedRunningTime="2025-11-21 13:57:33.271300081 +0000 UTC m=+913.069132329" Nov 21 13:57:33 crc kubenswrapper[5133]: I1121 13:57:33.292603 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-9vb4n" podStartSLOduration=2.353986968 podStartE2EDuration="5.292574132s" podCreationTimestamp="2025-11-21 13:57:28 +0000 UTC" firstStartedPulling="2025-11-21 13:57:29.095786203 +0000 UTC m=+908.893618451" lastFinishedPulling="2025-11-21 13:57:32.034373367 +0000 UTC m=+911.832205615" observedRunningTime="2025-11-21 13:57:33.285346988 +0000 UTC m=+913.083179316" watchObservedRunningTime="2025-11-21 13:57:33.292574132 +0000 UTC m=+913.090406380" Nov 21 13:57:36 crc kubenswrapper[5133]: I1121 13:57:36.253762 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-txndq" event={"ID":"db154bb8-c484-469b-89f3-d84a11ff8eae","Type":"ContainerStarted","Data":"ff258d4a93e6550ac0e0a8d502a4b488a61b75b9a139d705c9d4e07320ef0663"} Nov 21 13:57:36 crc kubenswrapper[5133]: I1121 13:57:36.277502 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-txndq" podStartSLOduration=2.040656508 podStartE2EDuration="8.277473531s" podCreationTimestamp="2025-11-21 13:57:28 +0000 UTC" firstStartedPulling="2025-11-21 13:57:29.014770718 +0000 UTC m=+908.812602956" lastFinishedPulling="2025-11-21 13:57:35.251587731 +0000 UTC m=+915.049419979" observedRunningTime="2025-11-21 13:57:36.27223955 +0000 UTC m=+916.070071828" watchObservedRunningTime="2025-11-21 13:57:36.277473531 +0000 UTC m=+916.075305779" Nov 21 13:57:38 crc kubenswrapper[5133]: I1121 13:57:38.740199 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-pb5nh" Nov 21 13:57:39 crc kubenswrapper[5133]: I1121 13:57:39.032339 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-68845dcc8-dkf5n" Nov 21 13:57:39 crc kubenswrapper[5133]: I1121 13:57:39.032403 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-68845dcc8-dkf5n" Nov 21 13:57:39 crc kubenswrapper[5133]: I1121 13:57:39.039550 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-68845dcc8-dkf5n" Nov 21 13:57:39 crc kubenswrapper[5133]: I1121 13:57:39.279780 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-68845dcc8-dkf5n" Nov 21 13:57:39 crc kubenswrapper[5133]: I1121 13:57:39.365042 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-m54n8"] Nov 21 13:57:42 crc kubenswrapper[5133]: I1121 13:57:42.211144 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-22m5p"] Nov 21 13:57:42 crc kubenswrapper[5133]: I1121 13:57:42.214503 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-22m5p" Nov 21 13:57:42 crc kubenswrapper[5133]: I1121 13:57:42.227869 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-22m5p"] Nov 21 13:57:42 crc kubenswrapper[5133]: I1121 13:57:42.364249 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c77c8d29-aca0-4c14-b092-f25dc85d1b87-utilities\") pod \"certified-operators-22m5p\" (UID: \"c77c8d29-aca0-4c14-b092-f25dc85d1b87\") " pod="openshift-marketplace/certified-operators-22m5p" Nov 21 13:57:42 crc kubenswrapper[5133]: I1121 13:57:42.364344 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c77c8d29-aca0-4c14-b092-f25dc85d1b87-catalog-content\") pod \"certified-operators-22m5p\" (UID: \"c77c8d29-aca0-4c14-b092-f25dc85d1b87\") " pod="openshift-marketplace/certified-operators-22m5p" Nov 21 13:57:42 crc kubenswrapper[5133]: I1121 13:57:42.364402 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kkr5f\" (UniqueName: \"kubernetes.io/projected/c77c8d29-aca0-4c14-b092-f25dc85d1b87-kube-api-access-kkr5f\") pod \"certified-operators-22m5p\" (UID: \"c77c8d29-aca0-4c14-b092-f25dc85d1b87\") " pod="openshift-marketplace/certified-operators-22m5p" Nov 21 13:57:42 crc kubenswrapper[5133]: I1121 13:57:42.465360 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c77c8d29-aca0-4c14-b092-f25dc85d1b87-catalog-content\") pod \"certified-operators-22m5p\" (UID: \"c77c8d29-aca0-4c14-b092-f25dc85d1b87\") " pod="openshift-marketplace/certified-operators-22m5p" Nov 21 13:57:42 crc kubenswrapper[5133]: I1121 13:57:42.465442 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kkr5f\" (UniqueName: \"kubernetes.io/projected/c77c8d29-aca0-4c14-b092-f25dc85d1b87-kube-api-access-kkr5f\") pod \"certified-operators-22m5p\" (UID: \"c77c8d29-aca0-4c14-b092-f25dc85d1b87\") " pod="openshift-marketplace/certified-operators-22m5p" Nov 21 13:57:42 crc kubenswrapper[5133]: I1121 13:57:42.465494 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c77c8d29-aca0-4c14-b092-f25dc85d1b87-utilities\") pod \"certified-operators-22m5p\" (UID: \"c77c8d29-aca0-4c14-b092-f25dc85d1b87\") " pod="openshift-marketplace/certified-operators-22m5p" Nov 21 13:57:42 crc kubenswrapper[5133]: I1121 13:57:42.466181 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c77c8d29-aca0-4c14-b092-f25dc85d1b87-catalog-content\") pod \"certified-operators-22m5p\" (UID: \"c77c8d29-aca0-4c14-b092-f25dc85d1b87\") " pod="openshift-marketplace/certified-operators-22m5p" Nov 21 13:57:42 crc kubenswrapper[5133]: I1121 13:57:42.466232 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c77c8d29-aca0-4c14-b092-f25dc85d1b87-utilities\") pod \"certified-operators-22m5p\" (UID: \"c77c8d29-aca0-4c14-b092-f25dc85d1b87\") " pod="openshift-marketplace/certified-operators-22m5p" Nov 21 13:57:42 crc kubenswrapper[5133]: I1121 13:57:42.495984 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kkr5f\" (UniqueName: \"kubernetes.io/projected/c77c8d29-aca0-4c14-b092-f25dc85d1b87-kube-api-access-kkr5f\") pod \"certified-operators-22m5p\" (UID: \"c77c8d29-aca0-4c14-b092-f25dc85d1b87\") " pod="openshift-marketplace/certified-operators-22m5p" Nov 21 13:57:42 crc kubenswrapper[5133]: I1121 13:57:42.537704 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-22m5p" Nov 21 13:57:43 crc kubenswrapper[5133]: I1121 13:57:43.073123 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-22m5p"] Nov 21 13:57:43 crc kubenswrapper[5133]: I1121 13:57:43.298454 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-22m5p" event={"ID":"c77c8d29-aca0-4c14-b092-f25dc85d1b87","Type":"ContainerStarted","Data":"7dfd159c2bf5db30cb7ea0a9d8c7e31074b5a70db08f6eaad2f1f8201644a006"} Nov 21 13:57:43 crc kubenswrapper[5133]: I1121 13:57:43.299848 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-22m5p" event={"ID":"c77c8d29-aca0-4c14-b092-f25dc85d1b87","Type":"ContainerStarted","Data":"d806cb9dba9eae975a318e0867bb4a5aff028e085cab5f0ac39a05011e525818"} Nov 21 13:57:44 crc kubenswrapper[5133]: I1121 13:57:44.308761 5133 generic.go:334] "Generic (PLEG): container finished" podID="c77c8d29-aca0-4c14-b092-f25dc85d1b87" containerID="7dfd159c2bf5db30cb7ea0a9d8c7e31074b5a70db08f6eaad2f1f8201644a006" exitCode=0 Nov 21 13:57:44 crc kubenswrapper[5133]: I1121 13:57:44.308861 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-22m5p" event={"ID":"c77c8d29-aca0-4c14-b092-f25dc85d1b87","Type":"ContainerDied","Data":"7dfd159c2bf5db30cb7ea0a9d8c7e31074b5a70db08f6eaad2f1f8201644a006"} Nov 21 13:57:46 crc kubenswrapper[5133]: I1121 13:57:46.327649 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-22m5p" event={"ID":"c77c8d29-aca0-4c14-b092-f25dc85d1b87","Type":"ContainerStarted","Data":"a5e95c4196f061994c01e1a21ee43fa60df00820b803b2ea6a6bcfc4c8df76be"} Nov 21 13:57:47 crc kubenswrapper[5133]: I1121 13:57:47.337074 5133 generic.go:334] "Generic (PLEG): container finished" podID="c77c8d29-aca0-4c14-b092-f25dc85d1b87" containerID="a5e95c4196f061994c01e1a21ee43fa60df00820b803b2ea6a6bcfc4c8df76be" exitCode=0 Nov 21 13:57:47 crc kubenswrapper[5133]: I1121 13:57:47.337123 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-22m5p" event={"ID":"c77c8d29-aca0-4c14-b092-f25dc85d1b87","Type":"ContainerDied","Data":"a5e95c4196f061994c01e1a21ee43fa60df00820b803b2ea6a6bcfc4c8df76be"} Nov 21 13:57:48 crc kubenswrapper[5133]: I1121 13:57:48.349744 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-22m5p" event={"ID":"c77c8d29-aca0-4c14-b092-f25dc85d1b87","Type":"ContainerStarted","Data":"42dcdbbde6171447d789034db0ae6058f6e6e245a2f1ee531a882f483dcc3372"} Nov 21 13:57:48 crc kubenswrapper[5133]: I1121 13:57:48.382458 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-22m5p" podStartSLOduration=2.8314670680000003 podStartE2EDuration="6.382429852s" podCreationTimestamp="2025-11-21 13:57:42 +0000 UTC" firstStartedPulling="2025-11-21 13:57:44.310965345 +0000 UTC m=+924.108797593" lastFinishedPulling="2025-11-21 13:57:47.861928139 +0000 UTC m=+927.659760377" observedRunningTime="2025-11-21 13:57:48.376096992 +0000 UTC m=+928.173929290" watchObservedRunningTime="2025-11-21 13:57:48.382429852 +0000 UTC m=+928.180262120" Nov 21 13:57:49 crc kubenswrapper[5133]: I1121 13:57:49.311581 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-b474p" Nov 21 13:57:52 crc kubenswrapper[5133]: I1121 13:57:52.538671 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-22m5p" Nov 21 13:57:52 crc kubenswrapper[5133]: I1121 13:57:52.539183 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-22m5p" Nov 21 13:57:52 crc kubenswrapper[5133]: I1121 13:57:52.603759 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-22m5p" Nov 21 13:57:53 crc kubenswrapper[5133]: I1121 13:57:53.427111 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-22m5p" Nov 21 13:57:53 crc kubenswrapper[5133]: I1121 13:57:53.500242 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-22m5p"] Nov 21 13:57:55 crc kubenswrapper[5133]: I1121 13:57:55.405345 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-22m5p" podUID="c77c8d29-aca0-4c14-b092-f25dc85d1b87" containerName="registry-server" containerID="cri-o://42dcdbbde6171447d789034db0ae6058f6e6e245a2f1ee531a882f483dcc3372" gracePeriod=2 Nov 21 13:57:57 crc kubenswrapper[5133]: I1121 13:57:57.601270 5133 generic.go:334] "Generic (PLEG): container finished" podID="c77c8d29-aca0-4c14-b092-f25dc85d1b87" containerID="42dcdbbde6171447d789034db0ae6058f6e6e245a2f1ee531a882f483dcc3372" exitCode=0 Nov 21 13:57:57 crc kubenswrapper[5133]: I1121 13:57:57.601370 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-22m5p" event={"ID":"c77c8d29-aca0-4c14-b092-f25dc85d1b87","Type":"ContainerDied","Data":"42dcdbbde6171447d789034db0ae6058f6e6e245a2f1ee531a882f483dcc3372"} Nov 21 13:57:58 crc kubenswrapper[5133]: I1121 13:57:58.026492 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-22m5p" Nov 21 13:57:58 crc kubenswrapper[5133]: I1121 13:57:58.100827 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c77c8d29-aca0-4c14-b092-f25dc85d1b87-utilities\") pod \"c77c8d29-aca0-4c14-b092-f25dc85d1b87\" (UID: \"c77c8d29-aca0-4c14-b092-f25dc85d1b87\") " Nov 21 13:57:58 crc kubenswrapper[5133]: I1121 13:57:58.100905 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c77c8d29-aca0-4c14-b092-f25dc85d1b87-catalog-content\") pod \"c77c8d29-aca0-4c14-b092-f25dc85d1b87\" (UID: \"c77c8d29-aca0-4c14-b092-f25dc85d1b87\") " Nov 21 13:57:58 crc kubenswrapper[5133]: I1121 13:57:58.100952 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kkr5f\" (UniqueName: \"kubernetes.io/projected/c77c8d29-aca0-4c14-b092-f25dc85d1b87-kube-api-access-kkr5f\") pod \"c77c8d29-aca0-4c14-b092-f25dc85d1b87\" (UID: \"c77c8d29-aca0-4c14-b092-f25dc85d1b87\") " Nov 21 13:57:58 crc kubenswrapper[5133]: I1121 13:57:58.103385 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c77c8d29-aca0-4c14-b092-f25dc85d1b87-utilities" (OuterVolumeSpecName: "utilities") pod "c77c8d29-aca0-4c14-b092-f25dc85d1b87" (UID: "c77c8d29-aca0-4c14-b092-f25dc85d1b87"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 13:57:58 crc kubenswrapper[5133]: I1121 13:57:58.114507 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c77c8d29-aca0-4c14-b092-f25dc85d1b87-kube-api-access-kkr5f" (OuterVolumeSpecName: "kube-api-access-kkr5f") pod "c77c8d29-aca0-4c14-b092-f25dc85d1b87" (UID: "c77c8d29-aca0-4c14-b092-f25dc85d1b87"). InnerVolumeSpecName "kube-api-access-kkr5f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:57:58 crc kubenswrapper[5133]: I1121 13:57:58.159526 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c77c8d29-aca0-4c14-b092-f25dc85d1b87-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c77c8d29-aca0-4c14-b092-f25dc85d1b87" (UID: "c77c8d29-aca0-4c14-b092-f25dc85d1b87"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 13:57:58 crc kubenswrapper[5133]: I1121 13:57:58.202538 5133 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c77c8d29-aca0-4c14-b092-f25dc85d1b87-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 13:57:58 crc kubenswrapper[5133]: I1121 13:57:58.202567 5133 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c77c8d29-aca0-4c14-b092-f25dc85d1b87-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 13:57:58 crc kubenswrapper[5133]: I1121 13:57:58.202581 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kkr5f\" (UniqueName: \"kubernetes.io/projected/c77c8d29-aca0-4c14-b092-f25dc85d1b87-kube-api-access-kkr5f\") on node \"crc\" DevicePath \"\"" Nov 21 13:57:58 crc kubenswrapper[5133]: I1121 13:57:58.614031 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-22m5p" event={"ID":"c77c8d29-aca0-4c14-b092-f25dc85d1b87","Type":"ContainerDied","Data":"d806cb9dba9eae975a318e0867bb4a5aff028e085cab5f0ac39a05011e525818"} Nov 21 13:57:58 crc kubenswrapper[5133]: I1121 13:57:58.614651 5133 scope.go:117] "RemoveContainer" containerID="42dcdbbde6171447d789034db0ae6058f6e6e245a2f1ee531a882f483dcc3372" Nov 21 13:57:58 crc kubenswrapper[5133]: I1121 13:57:58.614821 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-22m5p" Nov 21 13:57:58 crc kubenswrapper[5133]: I1121 13:57:58.639961 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-22m5p"] Nov 21 13:57:58 crc kubenswrapper[5133]: I1121 13:57:58.644879 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-22m5p"] Nov 21 13:57:58 crc kubenswrapper[5133]: I1121 13:57:58.654263 5133 scope.go:117] "RemoveContainer" containerID="a5e95c4196f061994c01e1a21ee43fa60df00820b803b2ea6a6bcfc4c8df76be" Nov 21 13:57:58 crc kubenswrapper[5133]: I1121 13:57:58.680666 5133 scope.go:117] "RemoveContainer" containerID="7dfd159c2bf5db30cb7ea0a9d8c7e31074b5a70db08f6eaad2f1f8201644a006" Nov 21 13:58:00 crc kubenswrapper[5133]: I1121 13:58:00.466529 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c77c8d29-aca0-4c14-b092-f25dc85d1b87" path="/var/lib/kubelet/pods/c77c8d29-aca0-4c14-b092-f25dc85d1b87/volumes" Nov 21 13:58:04 crc kubenswrapper[5133]: I1121 13:58:04.416117 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-m54n8" podUID="d1ec861e-fbe3-412e-9885-43a9e3c5be1e" containerName="console" containerID="cri-o://4063bcbf9055b4a7f0f3a87184b54bdc72b5ced57e980761037f66eb5c77c9ae" gracePeriod=15 Nov 21 13:58:04 crc kubenswrapper[5133]: I1121 13:58:04.663633 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-m54n8_d1ec861e-fbe3-412e-9885-43a9e3c5be1e/console/0.log" Nov 21 13:58:04 crc kubenswrapper[5133]: I1121 13:58:04.664055 5133 generic.go:334] "Generic (PLEG): container finished" podID="d1ec861e-fbe3-412e-9885-43a9e3c5be1e" containerID="4063bcbf9055b4a7f0f3a87184b54bdc72b5ced57e980761037f66eb5c77c9ae" exitCode=2 Nov 21 13:58:04 crc kubenswrapper[5133]: I1121 13:58:04.664094 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-m54n8" event={"ID":"d1ec861e-fbe3-412e-9885-43a9e3c5be1e","Type":"ContainerDied","Data":"4063bcbf9055b4a7f0f3a87184b54bdc72b5ced57e980761037f66eb5c77c9ae"} Nov 21 13:58:04 crc kubenswrapper[5133]: I1121 13:58:04.849163 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-m54n8_d1ec861e-fbe3-412e-9885-43a9e3c5be1e/console/0.log" Nov 21 13:58:04 crc kubenswrapper[5133]: I1121 13:58:04.849271 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-m54n8" Nov 21 13:58:04 crc kubenswrapper[5133]: I1121 13:58:04.897024 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/d1ec861e-fbe3-412e-9885-43a9e3c5be1e-console-oauth-config\") pod \"d1ec861e-fbe3-412e-9885-43a9e3c5be1e\" (UID: \"d1ec861e-fbe3-412e-9885-43a9e3c5be1e\") " Nov 21 13:58:04 crc kubenswrapper[5133]: I1121 13:58:04.897519 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/d1ec861e-fbe3-412e-9885-43a9e3c5be1e-console-config\") pod \"d1ec861e-fbe3-412e-9885-43a9e3c5be1e\" (UID: \"d1ec861e-fbe3-412e-9885-43a9e3c5be1e\") " Nov 21 13:58:04 crc kubenswrapper[5133]: I1121 13:58:04.897700 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/d1ec861e-fbe3-412e-9885-43a9e3c5be1e-service-ca\") pod \"d1ec861e-fbe3-412e-9885-43a9e3c5be1e\" (UID: \"d1ec861e-fbe3-412e-9885-43a9e3c5be1e\") " Nov 21 13:58:04 crc kubenswrapper[5133]: I1121 13:58:04.897861 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/d1ec861e-fbe3-412e-9885-43a9e3c5be1e-console-serving-cert\") pod \"d1ec861e-fbe3-412e-9885-43a9e3c5be1e\" (UID: \"d1ec861e-fbe3-412e-9885-43a9e3c5be1e\") " Nov 21 13:58:04 crc kubenswrapper[5133]: I1121 13:58:04.897978 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkl5p\" (UniqueName: \"kubernetes.io/projected/d1ec861e-fbe3-412e-9885-43a9e3c5be1e-kube-api-access-jkl5p\") pod \"d1ec861e-fbe3-412e-9885-43a9e3c5be1e\" (UID: \"d1ec861e-fbe3-412e-9885-43a9e3c5be1e\") " Nov 21 13:58:04 crc kubenswrapper[5133]: I1121 13:58:04.898797 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d1ec861e-fbe3-412e-9885-43a9e3c5be1e-console-config" (OuterVolumeSpecName: "console-config") pod "d1ec861e-fbe3-412e-9885-43a9e3c5be1e" (UID: "d1ec861e-fbe3-412e-9885-43a9e3c5be1e"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:58:04 crc kubenswrapper[5133]: I1121 13:58:04.898824 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d1ec861e-fbe3-412e-9885-43a9e3c5be1e-service-ca" (OuterVolumeSpecName: "service-ca") pod "d1ec861e-fbe3-412e-9885-43a9e3c5be1e" (UID: "d1ec861e-fbe3-412e-9885-43a9e3c5be1e"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:58:04 crc kubenswrapper[5133]: I1121 13:58:04.898623 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/d1ec861e-fbe3-412e-9885-43a9e3c5be1e-oauth-serving-cert\") pod \"d1ec861e-fbe3-412e-9885-43a9e3c5be1e\" (UID: \"d1ec861e-fbe3-412e-9885-43a9e3c5be1e\") " Nov 21 13:58:04 crc kubenswrapper[5133]: I1121 13:58:04.899742 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d1ec861e-fbe3-412e-9885-43a9e3c5be1e-trusted-ca-bundle\") pod \"d1ec861e-fbe3-412e-9885-43a9e3c5be1e\" (UID: \"d1ec861e-fbe3-412e-9885-43a9e3c5be1e\") " Nov 21 13:58:04 crc kubenswrapper[5133]: I1121 13:58:04.899615 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d1ec861e-fbe3-412e-9885-43a9e3c5be1e-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "d1ec861e-fbe3-412e-9885-43a9e3c5be1e" (UID: "d1ec861e-fbe3-412e-9885-43a9e3c5be1e"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:58:04 crc kubenswrapper[5133]: I1121 13:58:04.900425 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d1ec861e-fbe3-412e-9885-43a9e3c5be1e-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "d1ec861e-fbe3-412e-9885-43a9e3c5be1e" (UID: "d1ec861e-fbe3-412e-9885-43a9e3c5be1e"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 13:58:04 crc kubenswrapper[5133]: I1121 13:58:04.901374 5133 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d1ec861e-fbe3-412e-9885-43a9e3c5be1e-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 13:58:04 crc kubenswrapper[5133]: I1121 13:58:04.901483 5133 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/d1ec861e-fbe3-412e-9885-43a9e3c5be1e-console-config\") on node \"crc\" DevicePath \"\"" Nov 21 13:58:04 crc kubenswrapper[5133]: I1121 13:58:04.901577 5133 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/d1ec861e-fbe3-412e-9885-43a9e3c5be1e-service-ca\") on node \"crc\" DevicePath \"\"" Nov 21 13:58:04 crc kubenswrapper[5133]: I1121 13:58:04.901649 5133 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/d1ec861e-fbe3-412e-9885-43a9e3c5be1e-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 13:58:04 crc kubenswrapper[5133]: I1121 13:58:04.903262 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d1ec861e-fbe3-412e-9885-43a9e3c5be1e-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "d1ec861e-fbe3-412e-9885-43a9e3c5be1e" (UID: "d1ec861e-fbe3-412e-9885-43a9e3c5be1e"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:58:04 crc kubenswrapper[5133]: I1121 13:58:04.903547 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d1ec861e-fbe3-412e-9885-43a9e3c5be1e-kube-api-access-jkl5p" (OuterVolumeSpecName: "kube-api-access-jkl5p") pod "d1ec861e-fbe3-412e-9885-43a9e3c5be1e" (UID: "d1ec861e-fbe3-412e-9885-43a9e3c5be1e"). InnerVolumeSpecName "kube-api-access-jkl5p". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:58:04 crc kubenswrapper[5133]: I1121 13:58:04.906271 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d1ec861e-fbe3-412e-9885-43a9e3c5be1e-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "d1ec861e-fbe3-412e-9885-43a9e3c5be1e" (UID: "d1ec861e-fbe3-412e-9885-43a9e3c5be1e"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 13:58:05 crc kubenswrapper[5133]: I1121 13:58:05.002476 5133 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/d1ec861e-fbe3-412e-9885-43a9e3c5be1e-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 21 13:58:05 crc kubenswrapper[5133]: I1121 13:58:05.002935 5133 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/d1ec861e-fbe3-412e-9885-43a9e3c5be1e-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 21 13:58:05 crc kubenswrapper[5133]: I1121 13:58:05.002953 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkl5p\" (UniqueName: \"kubernetes.io/projected/d1ec861e-fbe3-412e-9885-43a9e3c5be1e-kube-api-access-jkl5p\") on node \"crc\" DevicePath \"\"" Nov 21 13:58:05 crc kubenswrapper[5133]: I1121 13:58:05.678767 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-m54n8_d1ec861e-fbe3-412e-9885-43a9e3c5be1e/console/0.log" Nov 21 13:58:05 crc kubenswrapper[5133]: I1121 13:58:05.678842 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-m54n8" event={"ID":"d1ec861e-fbe3-412e-9885-43a9e3c5be1e","Type":"ContainerDied","Data":"103a5c92bab38bd984c4b01da37ab5c1990891fe96f56c880c7d691de38cd0b7"} Nov 21 13:58:05 crc kubenswrapper[5133]: I1121 13:58:05.678897 5133 scope.go:117] "RemoveContainer" containerID="4063bcbf9055b4a7f0f3a87184b54bdc72b5ced57e980761037f66eb5c77c9ae" Nov 21 13:58:05 crc kubenswrapper[5133]: I1121 13:58:05.678938 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-m54n8" Nov 21 13:58:05 crc kubenswrapper[5133]: I1121 13:58:05.712836 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-m54n8"] Nov 21 13:58:05 crc kubenswrapper[5133]: I1121 13:58:05.716454 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-m54n8"] Nov 21 13:58:06 crc kubenswrapper[5133]: I1121 13:58:06.224716 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6fqwst"] Nov 21 13:58:06 crc kubenswrapper[5133]: E1121 13:58:06.225440 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c77c8d29-aca0-4c14-b092-f25dc85d1b87" containerName="registry-server" Nov 21 13:58:06 crc kubenswrapper[5133]: I1121 13:58:06.225583 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="c77c8d29-aca0-4c14-b092-f25dc85d1b87" containerName="registry-server" Nov 21 13:58:06 crc kubenswrapper[5133]: E1121 13:58:06.225693 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1ec861e-fbe3-412e-9885-43a9e3c5be1e" containerName="console" Nov 21 13:58:06 crc kubenswrapper[5133]: I1121 13:58:06.225807 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1ec861e-fbe3-412e-9885-43a9e3c5be1e" containerName="console" Nov 21 13:58:06 crc kubenswrapper[5133]: E1121 13:58:06.226112 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c77c8d29-aca0-4c14-b092-f25dc85d1b87" containerName="extract-utilities" Nov 21 13:58:06 crc kubenswrapper[5133]: I1121 13:58:06.226260 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="c77c8d29-aca0-4c14-b092-f25dc85d1b87" containerName="extract-utilities" Nov 21 13:58:06 crc kubenswrapper[5133]: E1121 13:58:06.226377 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c77c8d29-aca0-4c14-b092-f25dc85d1b87" containerName="extract-content" Nov 21 13:58:06 crc kubenswrapper[5133]: I1121 13:58:06.226479 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="c77c8d29-aca0-4c14-b092-f25dc85d1b87" containerName="extract-content" Nov 21 13:58:06 crc kubenswrapper[5133]: I1121 13:58:06.226810 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="c77c8d29-aca0-4c14-b092-f25dc85d1b87" containerName="registry-server" Nov 21 13:58:06 crc kubenswrapper[5133]: I1121 13:58:06.226943 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="d1ec861e-fbe3-412e-9885-43a9e3c5be1e" containerName="console" Nov 21 13:58:06 crc kubenswrapper[5133]: I1121 13:58:06.228571 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6fqwst" Nov 21 13:58:06 crc kubenswrapper[5133]: I1121 13:58:06.236481 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6fqwst"] Nov 21 13:58:06 crc kubenswrapper[5133]: I1121 13:58:06.243530 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 21 13:58:06 crc kubenswrapper[5133]: I1121 13:58:06.325795 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ba9d8865-d355-4438-a9eb-b50c55da374b-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6fqwst\" (UID: \"ba9d8865-d355-4438-a9eb-b50c55da374b\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6fqwst" Nov 21 13:58:06 crc kubenswrapper[5133]: I1121 13:58:06.325969 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ba9d8865-d355-4438-a9eb-b50c55da374b-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6fqwst\" (UID: \"ba9d8865-d355-4438-a9eb-b50c55da374b\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6fqwst" Nov 21 13:58:06 crc kubenswrapper[5133]: I1121 13:58:06.326021 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dvsms\" (UniqueName: \"kubernetes.io/projected/ba9d8865-d355-4438-a9eb-b50c55da374b-kube-api-access-dvsms\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6fqwst\" (UID: \"ba9d8865-d355-4438-a9eb-b50c55da374b\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6fqwst" Nov 21 13:58:06 crc kubenswrapper[5133]: I1121 13:58:06.428132 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ba9d8865-d355-4438-a9eb-b50c55da374b-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6fqwst\" (UID: \"ba9d8865-d355-4438-a9eb-b50c55da374b\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6fqwst" Nov 21 13:58:06 crc kubenswrapper[5133]: I1121 13:58:06.428340 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ba9d8865-d355-4438-a9eb-b50c55da374b-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6fqwst\" (UID: \"ba9d8865-d355-4438-a9eb-b50c55da374b\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6fqwst" Nov 21 13:58:06 crc kubenswrapper[5133]: I1121 13:58:06.428394 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dvsms\" (UniqueName: \"kubernetes.io/projected/ba9d8865-d355-4438-a9eb-b50c55da374b-kube-api-access-dvsms\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6fqwst\" (UID: \"ba9d8865-d355-4438-a9eb-b50c55da374b\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6fqwst" Nov 21 13:58:06 crc kubenswrapper[5133]: I1121 13:58:06.428848 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ba9d8865-d355-4438-a9eb-b50c55da374b-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6fqwst\" (UID: \"ba9d8865-d355-4438-a9eb-b50c55da374b\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6fqwst" Nov 21 13:58:06 crc kubenswrapper[5133]: I1121 13:58:06.428959 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ba9d8865-d355-4438-a9eb-b50c55da374b-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6fqwst\" (UID: \"ba9d8865-d355-4438-a9eb-b50c55da374b\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6fqwst" Nov 21 13:58:06 crc kubenswrapper[5133]: I1121 13:58:06.452269 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dvsms\" (UniqueName: \"kubernetes.io/projected/ba9d8865-d355-4438-a9eb-b50c55da374b-kube-api-access-dvsms\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6fqwst\" (UID: \"ba9d8865-d355-4438-a9eb-b50c55da374b\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6fqwst" Nov 21 13:58:06 crc kubenswrapper[5133]: I1121 13:58:06.464959 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d1ec861e-fbe3-412e-9885-43a9e3c5be1e" path="/var/lib/kubelet/pods/d1ec861e-fbe3-412e-9885-43a9e3c5be1e/volumes" Nov 21 13:58:06 crc kubenswrapper[5133]: I1121 13:58:06.561521 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6fqwst" Nov 21 13:58:06 crc kubenswrapper[5133]: I1121 13:58:06.796845 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6fqwst"] Nov 21 13:58:06 crc kubenswrapper[5133]: W1121 13:58:06.809307 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podba9d8865_d355_4438_a9eb_b50c55da374b.slice/crio-554c086532a310f27e4b17c0cf3c0aa3648f2639debe8445733ff261a7f3d327 WatchSource:0}: Error finding container 554c086532a310f27e4b17c0cf3c0aa3648f2639debe8445733ff261a7f3d327: Status 404 returned error can't find the container with id 554c086532a310f27e4b17c0cf3c0aa3648f2639debe8445733ff261a7f3d327 Nov 21 13:58:07 crc kubenswrapper[5133]: I1121 13:58:07.698938 5133 generic.go:334] "Generic (PLEG): container finished" podID="ba9d8865-d355-4438-a9eb-b50c55da374b" containerID="1939e1b50c10e05b8e14b7afe6cb094b59ab29ff6ec247f3dfa8de026eeae8c0" exitCode=0 Nov 21 13:58:07 crc kubenswrapper[5133]: I1121 13:58:07.699063 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6fqwst" event={"ID":"ba9d8865-d355-4438-a9eb-b50c55da374b","Type":"ContainerDied","Data":"1939e1b50c10e05b8e14b7afe6cb094b59ab29ff6ec247f3dfa8de026eeae8c0"} Nov 21 13:58:07 crc kubenswrapper[5133]: I1121 13:58:07.699544 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6fqwst" event={"ID":"ba9d8865-d355-4438-a9eb-b50c55da374b","Type":"ContainerStarted","Data":"554c086532a310f27e4b17c0cf3c0aa3648f2639debe8445733ff261a7f3d327"} Nov 21 13:58:10 crc kubenswrapper[5133]: I1121 13:58:10.719683 5133 generic.go:334] "Generic (PLEG): container finished" podID="ba9d8865-d355-4438-a9eb-b50c55da374b" containerID="ab42fac695aa2fa057f0eae179ad7099458d3441ebac9944cce7d0f34d27af37" exitCode=0 Nov 21 13:58:10 crc kubenswrapper[5133]: I1121 13:58:10.719737 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6fqwst" event={"ID":"ba9d8865-d355-4438-a9eb-b50c55da374b","Type":"ContainerDied","Data":"ab42fac695aa2fa057f0eae179ad7099458d3441ebac9944cce7d0f34d27af37"} Nov 21 13:58:11 crc kubenswrapper[5133]: I1121 13:58:11.729908 5133 generic.go:334] "Generic (PLEG): container finished" podID="ba9d8865-d355-4438-a9eb-b50c55da374b" containerID="878c591663c7f7072af5be151f215b197f62c542358efc039a3c26ab8f19bc2b" exitCode=0 Nov 21 13:58:11 crc kubenswrapper[5133]: I1121 13:58:11.730059 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6fqwst" event={"ID":"ba9d8865-d355-4438-a9eb-b50c55da374b","Type":"ContainerDied","Data":"878c591663c7f7072af5be151f215b197f62c542358efc039a3c26ab8f19bc2b"} Nov 21 13:58:13 crc kubenswrapper[5133]: I1121 13:58:13.036188 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6fqwst" Nov 21 13:58:13 crc kubenswrapper[5133]: I1121 13:58:13.146293 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ba9d8865-d355-4438-a9eb-b50c55da374b-util\") pod \"ba9d8865-d355-4438-a9eb-b50c55da374b\" (UID: \"ba9d8865-d355-4438-a9eb-b50c55da374b\") " Nov 21 13:58:13 crc kubenswrapper[5133]: I1121 13:58:13.146441 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dvsms\" (UniqueName: \"kubernetes.io/projected/ba9d8865-d355-4438-a9eb-b50c55da374b-kube-api-access-dvsms\") pod \"ba9d8865-d355-4438-a9eb-b50c55da374b\" (UID: \"ba9d8865-d355-4438-a9eb-b50c55da374b\") " Nov 21 13:58:13 crc kubenswrapper[5133]: I1121 13:58:13.146686 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ba9d8865-d355-4438-a9eb-b50c55da374b-bundle\") pod \"ba9d8865-d355-4438-a9eb-b50c55da374b\" (UID: \"ba9d8865-d355-4438-a9eb-b50c55da374b\") " Nov 21 13:58:13 crc kubenswrapper[5133]: I1121 13:58:13.148506 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ba9d8865-d355-4438-a9eb-b50c55da374b-bundle" (OuterVolumeSpecName: "bundle") pod "ba9d8865-d355-4438-a9eb-b50c55da374b" (UID: "ba9d8865-d355-4438-a9eb-b50c55da374b"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 13:58:13 crc kubenswrapper[5133]: I1121 13:58:13.159638 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ba9d8865-d355-4438-a9eb-b50c55da374b-kube-api-access-dvsms" (OuterVolumeSpecName: "kube-api-access-dvsms") pod "ba9d8865-d355-4438-a9eb-b50c55da374b" (UID: "ba9d8865-d355-4438-a9eb-b50c55da374b"). InnerVolumeSpecName "kube-api-access-dvsms". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:58:13 crc kubenswrapper[5133]: I1121 13:58:13.160628 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ba9d8865-d355-4438-a9eb-b50c55da374b-util" (OuterVolumeSpecName: "util") pod "ba9d8865-d355-4438-a9eb-b50c55da374b" (UID: "ba9d8865-d355-4438-a9eb-b50c55da374b"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 13:58:13 crc kubenswrapper[5133]: I1121 13:58:13.250534 5133 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ba9d8865-d355-4438-a9eb-b50c55da374b-util\") on node \"crc\" DevicePath \"\"" Nov 21 13:58:13 crc kubenswrapper[5133]: I1121 13:58:13.250886 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dvsms\" (UniqueName: \"kubernetes.io/projected/ba9d8865-d355-4438-a9eb-b50c55da374b-kube-api-access-dvsms\") on node \"crc\" DevicePath \"\"" Nov 21 13:58:13 crc kubenswrapper[5133]: I1121 13:58:13.250907 5133 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ba9d8865-d355-4438-a9eb-b50c55da374b-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 13:58:13 crc kubenswrapper[5133]: I1121 13:58:13.747327 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6fqwst" event={"ID":"ba9d8865-d355-4438-a9eb-b50c55da374b","Type":"ContainerDied","Data":"554c086532a310f27e4b17c0cf3c0aa3648f2639debe8445733ff261a7f3d327"} Nov 21 13:58:13 crc kubenswrapper[5133]: I1121 13:58:13.747386 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="554c086532a310f27e4b17c0cf3c0aa3648f2639debe8445733ff261a7f3d327" Nov 21 13:58:13 crc kubenswrapper[5133]: I1121 13:58:13.747422 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6fqwst" Nov 21 13:58:24 crc kubenswrapper[5133]: I1121 13:58:24.318893 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-5468dfbc5b-plbnr"] Nov 21 13:58:24 crc kubenswrapper[5133]: E1121 13:58:24.319870 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba9d8865-d355-4438-a9eb-b50c55da374b" containerName="util" Nov 21 13:58:24 crc kubenswrapper[5133]: I1121 13:58:24.319886 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba9d8865-d355-4438-a9eb-b50c55da374b" containerName="util" Nov 21 13:58:24 crc kubenswrapper[5133]: E1121 13:58:24.319900 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba9d8865-d355-4438-a9eb-b50c55da374b" containerName="pull" Nov 21 13:58:24 crc kubenswrapper[5133]: I1121 13:58:24.319906 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba9d8865-d355-4438-a9eb-b50c55da374b" containerName="pull" Nov 21 13:58:24 crc kubenswrapper[5133]: E1121 13:58:24.319918 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba9d8865-d355-4438-a9eb-b50c55da374b" containerName="extract" Nov 21 13:58:24 crc kubenswrapper[5133]: I1121 13:58:24.319924 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba9d8865-d355-4438-a9eb-b50c55da374b" containerName="extract" Nov 21 13:58:24 crc kubenswrapper[5133]: I1121 13:58:24.320062 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="ba9d8865-d355-4438-a9eb-b50c55da374b" containerName="extract" Nov 21 13:58:24 crc kubenswrapper[5133]: I1121 13:58:24.320502 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-5468dfbc5b-plbnr" Nov 21 13:58:24 crc kubenswrapper[5133]: I1121 13:58:24.322149 5133 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Nov 21 13:58:24 crc kubenswrapper[5133]: I1121 13:58:24.324684 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Nov 21 13:58:24 crc kubenswrapper[5133]: I1121 13:58:24.324967 5133 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-fvnm9" Nov 21 13:58:24 crc kubenswrapper[5133]: I1121 13:58:24.325106 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Nov 21 13:58:24 crc kubenswrapper[5133]: I1121 13:58:24.327568 5133 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Nov 21 13:58:24 crc kubenswrapper[5133]: I1121 13:58:24.335949 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-5468dfbc5b-plbnr"] Nov 21 13:58:24 crc kubenswrapper[5133]: I1121 13:58:24.409245 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vbb74\" (UniqueName: \"kubernetes.io/projected/fb96f30b-10d1-4d5a-a909-c718939fd900-kube-api-access-vbb74\") pod \"metallb-operator-controller-manager-5468dfbc5b-plbnr\" (UID: \"fb96f30b-10d1-4d5a-a909-c718939fd900\") " pod="metallb-system/metallb-operator-controller-manager-5468dfbc5b-plbnr" Nov 21 13:58:24 crc kubenswrapper[5133]: I1121 13:58:24.409294 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/fb96f30b-10d1-4d5a-a909-c718939fd900-webhook-cert\") pod \"metallb-operator-controller-manager-5468dfbc5b-plbnr\" (UID: \"fb96f30b-10d1-4d5a-a909-c718939fd900\") " pod="metallb-system/metallb-operator-controller-manager-5468dfbc5b-plbnr" Nov 21 13:58:24 crc kubenswrapper[5133]: I1121 13:58:24.409368 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/fb96f30b-10d1-4d5a-a909-c718939fd900-apiservice-cert\") pod \"metallb-operator-controller-manager-5468dfbc5b-plbnr\" (UID: \"fb96f30b-10d1-4d5a-a909-c718939fd900\") " pod="metallb-system/metallb-operator-controller-manager-5468dfbc5b-plbnr" Nov 21 13:58:24 crc kubenswrapper[5133]: I1121 13:58:24.510552 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vbb74\" (UniqueName: \"kubernetes.io/projected/fb96f30b-10d1-4d5a-a909-c718939fd900-kube-api-access-vbb74\") pod \"metallb-operator-controller-manager-5468dfbc5b-plbnr\" (UID: \"fb96f30b-10d1-4d5a-a909-c718939fd900\") " pod="metallb-system/metallb-operator-controller-manager-5468dfbc5b-plbnr" Nov 21 13:58:24 crc kubenswrapper[5133]: I1121 13:58:24.510612 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/fb96f30b-10d1-4d5a-a909-c718939fd900-webhook-cert\") pod \"metallb-operator-controller-manager-5468dfbc5b-plbnr\" (UID: \"fb96f30b-10d1-4d5a-a909-c718939fd900\") " pod="metallb-system/metallb-operator-controller-manager-5468dfbc5b-plbnr" Nov 21 13:58:24 crc kubenswrapper[5133]: I1121 13:58:24.510635 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/fb96f30b-10d1-4d5a-a909-c718939fd900-apiservice-cert\") pod \"metallb-operator-controller-manager-5468dfbc5b-plbnr\" (UID: \"fb96f30b-10d1-4d5a-a909-c718939fd900\") " pod="metallb-system/metallb-operator-controller-manager-5468dfbc5b-plbnr" Nov 21 13:58:24 crc kubenswrapper[5133]: I1121 13:58:24.518159 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/fb96f30b-10d1-4d5a-a909-c718939fd900-webhook-cert\") pod \"metallb-operator-controller-manager-5468dfbc5b-plbnr\" (UID: \"fb96f30b-10d1-4d5a-a909-c718939fd900\") " pod="metallb-system/metallb-operator-controller-manager-5468dfbc5b-plbnr" Nov 21 13:58:24 crc kubenswrapper[5133]: I1121 13:58:24.519798 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/fb96f30b-10d1-4d5a-a909-c718939fd900-apiservice-cert\") pod \"metallb-operator-controller-manager-5468dfbc5b-plbnr\" (UID: \"fb96f30b-10d1-4d5a-a909-c718939fd900\") " pod="metallb-system/metallb-operator-controller-manager-5468dfbc5b-plbnr" Nov 21 13:58:24 crc kubenswrapper[5133]: I1121 13:58:24.533461 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vbb74\" (UniqueName: \"kubernetes.io/projected/fb96f30b-10d1-4d5a-a909-c718939fd900-kube-api-access-vbb74\") pod \"metallb-operator-controller-manager-5468dfbc5b-plbnr\" (UID: \"fb96f30b-10d1-4d5a-a909-c718939fd900\") " pod="metallb-system/metallb-operator-controller-manager-5468dfbc5b-plbnr" Nov 21 13:58:24 crc kubenswrapper[5133]: I1121 13:58:24.566383 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-5d4d9648d8-8trqg"] Nov 21 13:58:24 crc kubenswrapper[5133]: I1121 13:58:24.567116 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-5d4d9648d8-8trqg" Nov 21 13:58:24 crc kubenswrapper[5133]: I1121 13:58:24.574463 5133 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 21 13:58:24 crc kubenswrapper[5133]: I1121 13:58:24.574540 5133 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-78wpj" Nov 21 13:58:24 crc kubenswrapper[5133]: I1121 13:58:24.575403 5133 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Nov 21 13:58:24 crc kubenswrapper[5133]: I1121 13:58:24.585195 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-5d4d9648d8-8trqg"] Nov 21 13:58:24 crc kubenswrapper[5133]: I1121 13:58:24.611964 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/88d867e4-8f6e-407b-b65f-b87a47d2c578-webhook-cert\") pod \"metallb-operator-webhook-server-5d4d9648d8-8trqg\" (UID: \"88d867e4-8f6e-407b-b65f-b87a47d2c578\") " pod="metallb-system/metallb-operator-webhook-server-5d4d9648d8-8trqg" Nov 21 13:58:24 crc kubenswrapper[5133]: I1121 13:58:24.612078 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/88d867e4-8f6e-407b-b65f-b87a47d2c578-apiservice-cert\") pod \"metallb-operator-webhook-server-5d4d9648d8-8trqg\" (UID: \"88d867e4-8f6e-407b-b65f-b87a47d2c578\") " pod="metallb-system/metallb-operator-webhook-server-5d4d9648d8-8trqg" Nov 21 13:58:24 crc kubenswrapper[5133]: I1121 13:58:24.612169 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dmv9p\" (UniqueName: \"kubernetes.io/projected/88d867e4-8f6e-407b-b65f-b87a47d2c578-kube-api-access-dmv9p\") pod \"metallb-operator-webhook-server-5d4d9648d8-8trqg\" (UID: \"88d867e4-8f6e-407b-b65f-b87a47d2c578\") " pod="metallb-system/metallb-operator-webhook-server-5d4d9648d8-8trqg" Nov 21 13:58:24 crc kubenswrapper[5133]: I1121 13:58:24.640657 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-5468dfbc5b-plbnr" Nov 21 13:58:24 crc kubenswrapper[5133]: I1121 13:58:24.714035 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dmv9p\" (UniqueName: \"kubernetes.io/projected/88d867e4-8f6e-407b-b65f-b87a47d2c578-kube-api-access-dmv9p\") pod \"metallb-operator-webhook-server-5d4d9648d8-8trqg\" (UID: \"88d867e4-8f6e-407b-b65f-b87a47d2c578\") " pod="metallb-system/metallb-operator-webhook-server-5d4d9648d8-8trqg" Nov 21 13:58:24 crc kubenswrapper[5133]: I1121 13:58:24.714456 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/88d867e4-8f6e-407b-b65f-b87a47d2c578-webhook-cert\") pod \"metallb-operator-webhook-server-5d4d9648d8-8trqg\" (UID: \"88d867e4-8f6e-407b-b65f-b87a47d2c578\") " pod="metallb-system/metallb-operator-webhook-server-5d4d9648d8-8trqg" Nov 21 13:58:24 crc kubenswrapper[5133]: I1121 13:58:24.714593 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/88d867e4-8f6e-407b-b65f-b87a47d2c578-apiservice-cert\") pod \"metallb-operator-webhook-server-5d4d9648d8-8trqg\" (UID: \"88d867e4-8f6e-407b-b65f-b87a47d2c578\") " pod="metallb-system/metallb-operator-webhook-server-5d4d9648d8-8trqg" Nov 21 13:58:24 crc kubenswrapper[5133]: I1121 13:58:24.720653 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/88d867e4-8f6e-407b-b65f-b87a47d2c578-webhook-cert\") pod \"metallb-operator-webhook-server-5d4d9648d8-8trqg\" (UID: \"88d867e4-8f6e-407b-b65f-b87a47d2c578\") " pod="metallb-system/metallb-operator-webhook-server-5d4d9648d8-8trqg" Nov 21 13:58:24 crc kubenswrapper[5133]: I1121 13:58:24.739048 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/88d867e4-8f6e-407b-b65f-b87a47d2c578-apiservice-cert\") pod \"metallb-operator-webhook-server-5d4d9648d8-8trqg\" (UID: \"88d867e4-8f6e-407b-b65f-b87a47d2c578\") " pod="metallb-system/metallb-operator-webhook-server-5d4d9648d8-8trqg" Nov 21 13:58:24 crc kubenswrapper[5133]: I1121 13:58:24.751250 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dmv9p\" (UniqueName: \"kubernetes.io/projected/88d867e4-8f6e-407b-b65f-b87a47d2c578-kube-api-access-dmv9p\") pod \"metallb-operator-webhook-server-5d4d9648d8-8trqg\" (UID: \"88d867e4-8f6e-407b-b65f-b87a47d2c578\") " pod="metallb-system/metallb-operator-webhook-server-5d4d9648d8-8trqg" Nov 21 13:58:24 crc kubenswrapper[5133]: I1121 13:58:24.892152 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-5d4d9648d8-8trqg" Nov 21 13:58:24 crc kubenswrapper[5133]: I1121 13:58:24.952868 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-5468dfbc5b-plbnr"] Nov 21 13:58:24 crc kubenswrapper[5133]: W1121 13:58:24.967040 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfb96f30b_10d1_4d5a_a909_c718939fd900.slice/crio-39c92a7bd3c6166368962fc464994b140bb6bcd7e096080030e55f0653134846 WatchSource:0}: Error finding container 39c92a7bd3c6166368962fc464994b140bb6bcd7e096080030e55f0653134846: Status 404 returned error can't find the container with id 39c92a7bd3c6166368962fc464994b140bb6bcd7e096080030e55f0653134846 Nov 21 13:58:26 crc kubenswrapper[5133]: I1121 13:58:25.826129 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-5468dfbc5b-plbnr" event={"ID":"fb96f30b-10d1-4d5a-a909-c718939fd900","Type":"ContainerStarted","Data":"39c92a7bd3c6166368962fc464994b140bb6bcd7e096080030e55f0653134846"} Nov 21 13:58:26 crc kubenswrapper[5133]: I1121 13:58:26.313356 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-5d4d9648d8-8trqg"] Nov 21 13:58:26 crc kubenswrapper[5133]: I1121 13:58:26.841232 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-5d4d9648d8-8trqg" event={"ID":"88d867e4-8f6e-407b-b65f-b87a47d2c578","Type":"ContainerStarted","Data":"08e8a39d14f76dc16ace3be34b9aacdc7c7f88bb27c83971ae8b4f6b5443744b"} Nov 21 13:58:28 crc kubenswrapper[5133]: I1121 13:58:28.860045 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-5468dfbc5b-plbnr" event={"ID":"fb96f30b-10d1-4d5a-a909-c718939fd900","Type":"ContainerStarted","Data":"eae9d80143716061ffd82b55bbc07d2f1d24725985194bb5ecaff8e0b01071b5"} Nov 21 13:58:28 crc kubenswrapper[5133]: I1121 13:58:28.860455 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-5468dfbc5b-plbnr" Nov 21 13:58:28 crc kubenswrapper[5133]: I1121 13:58:28.894719 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-5468dfbc5b-plbnr" podStartSLOduration=1.644018227 podStartE2EDuration="4.894695895s" podCreationTimestamp="2025-11-21 13:58:24 +0000 UTC" firstStartedPulling="2025-11-21 13:58:24.972387864 +0000 UTC m=+964.770220112" lastFinishedPulling="2025-11-21 13:58:28.223065532 +0000 UTC m=+968.020897780" observedRunningTime="2025-11-21 13:58:28.888587652 +0000 UTC m=+968.686419900" watchObservedRunningTime="2025-11-21 13:58:28.894695895 +0000 UTC m=+968.692528143" Nov 21 13:58:32 crc kubenswrapper[5133]: I1121 13:58:32.892711 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-5d4d9648d8-8trqg" event={"ID":"88d867e4-8f6e-407b-b65f-b87a47d2c578","Type":"ContainerStarted","Data":"cb285ed810019ff6882d43e8eedbeff3887c6946c24b7c13a66842e481acc982"} Nov 21 13:58:32 crc kubenswrapper[5133]: I1121 13:58:32.893170 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-5d4d9648d8-8trqg" Nov 21 13:58:32 crc kubenswrapper[5133]: I1121 13:58:32.915112 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-5d4d9648d8-8trqg" podStartSLOduration=3.28086047 podStartE2EDuration="8.915081151s" podCreationTimestamp="2025-11-21 13:58:24 +0000 UTC" firstStartedPulling="2025-11-21 13:58:26.337355826 +0000 UTC m=+966.135188074" lastFinishedPulling="2025-11-21 13:58:31.971576497 +0000 UTC m=+971.769408755" observedRunningTime="2025-11-21 13:58:32.912536153 +0000 UTC m=+972.710368411" watchObservedRunningTime="2025-11-21 13:58:32.915081151 +0000 UTC m=+972.712913409" Nov 21 13:58:44 crc kubenswrapper[5133]: I1121 13:58:44.901783 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-5d4d9648d8-8trqg" Nov 21 13:58:46 crc kubenswrapper[5133]: I1121 13:58:46.638154 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-xgn99"] Nov 21 13:58:46 crc kubenswrapper[5133]: I1121 13:58:46.640111 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xgn99" Nov 21 13:58:46 crc kubenswrapper[5133]: I1121 13:58:46.657527 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-xgn99"] Nov 21 13:58:46 crc kubenswrapper[5133]: I1121 13:58:46.760701 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2ff9998c-bbe6-41a4-8f71-0bbb52c029ba-utilities\") pod \"redhat-marketplace-xgn99\" (UID: \"2ff9998c-bbe6-41a4-8f71-0bbb52c029ba\") " pod="openshift-marketplace/redhat-marketplace-xgn99" Nov 21 13:58:46 crc kubenswrapper[5133]: I1121 13:58:46.760779 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qggbh\" (UniqueName: \"kubernetes.io/projected/2ff9998c-bbe6-41a4-8f71-0bbb52c029ba-kube-api-access-qggbh\") pod \"redhat-marketplace-xgn99\" (UID: \"2ff9998c-bbe6-41a4-8f71-0bbb52c029ba\") " pod="openshift-marketplace/redhat-marketplace-xgn99" Nov 21 13:58:46 crc kubenswrapper[5133]: I1121 13:58:46.760838 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2ff9998c-bbe6-41a4-8f71-0bbb52c029ba-catalog-content\") pod \"redhat-marketplace-xgn99\" (UID: \"2ff9998c-bbe6-41a4-8f71-0bbb52c029ba\") " pod="openshift-marketplace/redhat-marketplace-xgn99" Nov 21 13:58:46 crc kubenswrapper[5133]: I1121 13:58:46.863137 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qggbh\" (UniqueName: \"kubernetes.io/projected/2ff9998c-bbe6-41a4-8f71-0bbb52c029ba-kube-api-access-qggbh\") pod \"redhat-marketplace-xgn99\" (UID: \"2ff9998c-bbe6-41a4-8f71-0bbb52c029ba\") " pod="openshift-marketplace/redhat-marketplace-xgn99" Nov 21 13:58:46 crc kubenswrapper[5133]: I1121 13:58:46.863240 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2ff9998c-bbe6-41a4-8f71-0bbb52c029ba-catalog-content\") pod \"redhat-marketplace-xgn99\" (UID: \"2ff9998c-bbe6-41a4-8f71-0bbb52c029ba\") " pod="openshift-marketplace/redhat-marketplace-xgn99" Nov 21 13:58:46 crc kubenswrapper[5133]: I1121 13:58:46.863315 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2ff9998c-bbe6-41a4-8f71-0bbb52c029ba-utilities\") pod \"redhat-marketplace-xgn99\" (UID: \"2ff9998c-bbe6-41a4-8f71-0bbb52c029ba\") " pod="openshift-marketplace/redhat-marketplace-xgn99" Nov 21 13:58:46 crc kubenswrapper[5133]: I1121 13:58:46.863827 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2ff9998c-bbe6-41a4-8f71-0bbb52c029ba-utilities\") pod \"redhat-marketplace-xgn99\" (UID: \"2ff9998c-bbe6-41a4-8f71-0bbb52c029ba\") " pod="openshift-marketplace/redhat-marketplace-xgn99" Nov 21 13:58:46 crc kubenswrapper[5133]: I1121 13:58:46.863950 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2ff9998c-bbe6-41a4-8f71-0bbb52c029ba-catalog-content\") pod \"redhat-marketplace-xgn99\" (UID: \"2ff9998c-bbe6-41a4-8f71-0bbb52c029ba\") " pod="openshift-marketplace/redhat-marketplace-xgn99" Nov 21 13:58:46 crc kubenswrapper[5133]: I1121 13:58:46.896295 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qggbh\" (UniqueName: \"kubernetes.io/projected/2ff9998c-bbe6-41a4-8f71-0bbb52c029ba-kube-api-access-qggbh\") pod \"redhat-marketplace-xgn99\" (UID: \"2ff9998c-bbe6-41a4-8f71-0bbb52c029ba\") " pod="openshift-marketplace/redhat-marketplace-xgn99" Nov 21 13:58:46 crc kubenswrapper[5133]: I1121 13:58:46.962911 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xgn99" Nov 21 13:58:47 crc kubenswrapper[5133]: I1121 13:58:47.476368 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-xgn99"] Nov 21 13:58:47 crc kubenswrapper[5133]: I1121 13:58:47.995432 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xgn99" event={"ID":"2ff9998c-bbe6-41a4-8f71-0bbb52c029ba","Type":"ContainerStarted","Data":"1e81fdf084a88e9ba79ebd448192d084bad3b25cc79fb8bef74aa02aa56be7ec"} Nov 21 13:58:50 crc kubenswrapper[5133]: I1121 13:58:50.011257 5133 generic.go:334] "Generic (PLEG): container finished" podID="2ff9998c-bbe6-41a4-8f71-0bbb52c029ba" containerID="e1c058468f2f2d9590f5172ee42b346738582b777a4d76acef115ea392f87026" exitCode=0 Nov 21 13:58:50 crc kubenswrapper[5133]: I1121 13:58:50.011314 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xgn99" event={"ID":"2ff9998c-bbe6-41a4-8f71-0bbb52c029ba","Type":"ContainerDied","Data":"e1c058468f2f2d9590f5172ee42b346738582b777a4d76acef115ea392f87026"} Nov 21 13:58:52 crc kubenswrapper[5133]: I1121 13:58:52.030120 5133 generic.go:334] "Generic (PLEG): container finished" podID="2ff9998c-bbe6-41a4-8f71-0bbb52c029ba" containerID="ad5f405088e63b9ccc2a3bb42484e0006379a5c972ed93a76a572060e2a54eed" exitCode=0 Nov 21 13:58:52 crc kubenswrapper[5133]: I1121 13:58:52.030215 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xgn99" event={"ID":"2ff9998c-bbe6-41a4-8f71-0bbb52c029ba","Type":"ContainerDied","Data":"ad5f405088e63b9ccc2a3bb42484e0006379a5c972ed93a76a572060e2a54eed"} Nov 21 13:58:54 crc kubenswrapper[5133]: I1121 13:58:54.046705 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xgn99" event={"ID":"2ff9998c-bbe6-41a4-8f71-0bbb52c029ba","Type":"ContainerStarted","Data":"d4b10e629050fcbd133fcc9f680614dc00c3a7701ba89358549687545346c122"} Nov 21 13:58:54 crc kubenswrapper[5133]: I1121 13:58:54.066457 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-xgn99" podStartSLOduration=4.863591737 podStartE2EDuration="8.066431389s" podCreationTimestamp="2025-11-21 13:58:46 +0000 UTC" firstStartedPulling="2025-11-21 13:58:50.014277562 +0000 UTC m=+989.812109810" lastFinishedPulling="2025-11-21 13:58:53.217117214 +0000 UTC m=+993.014949462" observedRunningTime="2025-11-21 13:58:54.06570566 +0000 UTC m=+993.863537918" watchObservedRunningTime="2025-11-21 13:58:54.066431389 +0000 UTC m=+993.864263637" Nov 21 13:58:56 crc kubenswrapper[5133]: I1121 13:58:56.963622 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-xgn99" Nov 21 13:58:56 crc kubenswrapper[5133]: I1121 13:58:56.964088 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-xgn99" Nov 21 13:58:57 crc kubenswrapper[5133]: I1121 13:58:57.009832 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-xgn99" Nov 21 13:59:04 crc kubenswrapper[5133]: I1121 13:59:04.644748 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-5468dfbc5b-plbnr" Nov 21 13:59:05 crc kubenswrapper[5133]: I1121 13:59:05.314247 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-9drp9"] Nov 21 13:59:05 crc kubenswrapper[5133]: I1121 13:59:05.315797 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-6998585d5-9drp9" Nov 21 13:59:05 crc kubenswrapper[5133]: I1121 13:59:05.318061 5133 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-w62sk" Nov 21 13:59:05 crc kubenswrapper[5133]: I1121 13:59:05.326138 5133 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Nov 21 13:59:05 crc kubenswrapper[5133]: I1121 13:59:05.326358 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-9drp9"] Nov 21 13:59:05 crc kubenswrapper[5133]: I1121 13:59:05.330244 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-9sscr"] Nov 21 13:59:05 crc kubenswrapper[5133]: I1121 13:59:05.337740 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-9sscr" Nov 21 13:59:05 crc kubenswrapper[5133]: I1121 13:59:05.341260 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Nov 21 13:59:05 crc kubenswrapper[5133]: I1121 13:59:05.342066 5133 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Nov 21 13:59:05 crc kubenswrapper[5133]: I1121 13:59:05.346061 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/48504bed-9d84-42d4-8ec2-c98bcb981b11-reloader\") pod \"frr-k8s-9sscr\" (UID: \"48504bed-9d84-42d4-8ec2-c98bcb981b11\") " pod="metallb-system/frr-k8s-9sscr" Nov 21 13:59:05 crc kubenswrapper[5133]: I1121 13:59:05.346119 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lx66r\" (UniqueName: \"kubernetes.io/projected/67474cce-10bd-4da6-895f-a7e465d362a6-kube-api-access-lx66r\") pod \"frr-k8s-webhook-server-6998585d5-9drp9\" (UID: \"67474cce-10bd-4da6-895f-a7e465d362a6\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-9drp9" Nov 21 13:59:05 crc kubenswrapper[5133]: I1121 13:59:05.346190 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p6vkx\" (UniqueName: \"kubernetes.io/projected/48504bed-9d84-42d4-8ec2-c98bcb981b11-kube-api-access-p6vkx\") pod \"frr-k8s-9sscr\" (UID: \"48504bed-9d84-42d4-8ec2-c98bcb981b11\") " pod="metallb-system/frr-k8s-9sscr" Nov 21 13:59:05 crc kubenswrapper[5133]: I1121 13:59:05.346266 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/48504bed-9d84-42d4-8ec2-c98bcb981b11-frr-sockets\") pod \"frr-k8s-9sscr\" (UID: \"48504bed-9d84-42d4-8ec2-c98bcb981b11\") " pod="metallb-system/frr-k8s-9sscr" Nov 21 13:59:05 crc kubenswrapper[5133]: I1121 13:59:05.346375 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/48504bed-9d84-42d4-8ec2-c98bcb981b11-metrics\") pod \"frr-k8s-9sscr\" (UID: \"48504bed-9d84-42d4-8ec2-c98bcb981b11\") " pod="metallb-system/frr-k8s-9sscr" Nov 21 13:59:05 crc kubenswrapper[5133]: I1121 13:59:05.346412 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/48504bed-9d84-42d4-8ec2-c98bcb981b11-frr-conf\") pod \"frr-k8s-9sscr\" (UID: \"48504bed-9d84-42d4-8ec2-c98bcb981b11\") " pod="metallb-system/frr-k8s-9sscr" Nov 21 13:59:05 crc kubenswrapper[5133]: I1121 13:59:05.346438 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/48504bed-9d84-42d4-8ec2-c98bcb981b11-frr-startup\") pod \"frr-k8s-9sscr\" (UID: \"48504bed-9d84-42d4-8ec2-c98bcb981b11\") " pod="metallb-system/frr-k8s-9sscr" Nov 21 13:59:05 crc kubenswrapper[5133]: I1121 13:59:05.346498 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/67474cce-10bd-4da6-895f-a7e465d362a6-cert\") pod \"frr-k8s-webhook-server-6998585d5-9drp9\" (UID: \"67474cce-10bd-4da6-895f-a7e465d362a6\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-9drp9" Nov 21 13:59:05 crc kubenswrapper[5133]: I1121 13:59:05.346523 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/48504bed-9d84-42d4-8ec2-c98bcb981b11-metrics-certs\") pod \"frr-k8s-9sscr\" (UID: \"48504bed-9d84-42d4-8ec2-c98bcb981b11\") " pod="metallb-system/frr-k8s-9sscr" Nov 21 13:59:05 crc kubenswrapper[5133]: I1121 13:59:05.422344 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-nstmv"] Nov 21 13:59:05 crc kubenswrapper[5133]: I1121 13:59:05.423711 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-nstmv" Nov 21 13:59:05 crc kubenswrapper[5133]: I1121 13:59:05.427519 5133 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Nov 21 13:59:05 crc kubenswrapper[5133]: I1121 13:59:05.427872 5133 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-jgqb8" Nov 21 13:59:05 crc kubenswrapper[5133]: I1121 13:59:05.428345 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Nov 21 13:59:05 crc kubenswrapper[5133]: I1121 13:59:05.430630 5133 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Nov 21 13:59:05 crc kubenswrapper[5133]: I1121 13:59:05.439842 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-6c7b4b5f48-h9nrl"] Nov 21 13:59:05 crc kubenswrapper[5133]: I1121 13:59:05.441081 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6c7b4b5f48-h9nrl" Nov 21 13:59:05 crc kubenswrapper[5133]: I1121 13:59:05.444692 5133 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Nov 21 13:59:05 crc kubenswrapper[5133]: I1121 13:59:05.448271 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3a9b4a92-1fba-4b32-94c3-be5343dae8d2-metrics-certs\") pod \"speaker-nstmv\" (UID: \"3a9b4a92-1fba-4b32-94c3-be5343dae8d2\") " pod="metallb-system/speaker-nstmv" Nov 21 13:59:05 crc kubenswrapper[5133]: I1121 13:59:05.448357 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/48504bed-9d84-42d4-8ec2-c98bcb981b11-metrics\") pod \"frr-k8s-9sscr\" (UID: \"48504bed-9d84-42d4-8ec2-c98bcb981b11\") " pod="metallb-system/frr-k8s-9sscr" Nov 21 13:59:05 crc kubenswrapper[5133]: I1121 13:59:05.448412 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lr2js\" (UniqueName: \"kubernetes.io/projected/3a9b4a92-1fba-4b32-94c3-be5343dae8d2-kube-api-access-lr2js\") pod \"speaker-nstmv\" (UID: \"3a9b4a92-1fba-4b32-94c3-be5343dae8d2\") " pod="metallb-system/speaker-nstmv" Nov 21 13:59:05 crc kubenswrapper[5133]: I1121 13:59:05.448448 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/48504bed-9d84-42d4-8ec2-c98bcb981b11-frr-conf\") pod \"frr-k8s-9sscr\" (UID: \"48504bed-9d84-42d4-8ec2-c98bcb981b11\") " pod="metallb-system/frr-k8s-9sscr" Nov 21 13:59:05 crc kubenswrapper[5133]: I1121 13:59:05.448505 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/f18f6a68-8b84-4830-a504-70170e7e0125-cert\") pod \"controller-6c7b4b5f48-h9nrl\" (UID: \"f18f6a68-8b84-4830-a504-70170e7e0125\") " pod="metallb-system/controller-6c7b4b5f48-h9nrl" Nov 21 13:59:05 crc kubenswrapper[5133]: I1121 13:59:05.448532 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/48504bed-9d84-42d4-8ec2-c98bcb981b11-frr-startup\") pod \"frr-k8s-9sscr\" (UID: \"48504bed-9d84-42d4-8ec2-c98bcb981b11\") " pod="metallb-system/frr-k8s-9sscr" Nov 21 13:59:05 crc kubenswrapper[5133]: I1121 13:59:05.448552 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/67474cce-10bd-4da6-895f-a7e465d362a6-cert\") pod \"frr-k8s-webhook-server-6998585d5-9drp9\" (UID: \"67474cce-10bd-4da6-895f-a7e465d362a6\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-9drp9" Nov 21 13:59:05 crc kubenswrapper[5133]: I1121 13:59:05.448573 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/48504bed-9d84-42d4-8ec2-c98bcb981b11-metrics-certs\") pod \"frr-k8s-9sscr\" (UID: \"48504bed-9d84-42d4-8ec2-c98bcb981b11\") " pod="metallb-system/frr-k8s-9sscr" Nov 21 13:59:05 crc kubenswrapper[5133]: I1121 13:59:05.448600 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/48504bed-9d84-42d4-8ec2-c98bcb981b11-reloader\") pod \"frr-k8s-9sscr\" (UID: \"48504bed-9d84-42d4-8ec2-c98bcb981b11\") " pod="metallb-system/frr-k8s-9sscr" Nov 21 13:59:05 crc kubenswrapper[5133]: I1121 13:59:05.448617 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lx66r\" (UniqueName: \"kubernetes.io/projected/67474cce-10bd-4da6-895f-a7e465d362a6-kube-api-access-lx66r\") pod \"frr-k8s-webhook-server-6998585d5-9drp9\" (UID: \"67474cce-10bd-4da6-895f-a7e465d362a6\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-9drp9" Nov 21 13:59:05 crc kubenswrapper[5133]: I1121 13:59:05.448665 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p6vkx\" (UniqueName: \"kubernetes.io/projected/48504bed-9d84-42d4-8ec2-c98bcb981b11-kube-api-access-p6vkx\") pod \"frr-k8s-9sscr\" (UID: \"48504bed-9d84-42d4-8ec2-c98bcb981b11\") " pod="metallb-system/frr-k8s-9sscr" Nov 21 13:59:05 crc kubenswrapper[5133]: I1121 13:59:05.448690 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/48504bed-9d84-42d4-8ec2-c98bcb981b11-frr-sockets\") pod \"frr-k8s-9sscr\" (UID: \"48504bed-9d84-42d4-8ec2-c98bcb981b11\") " pod="metallb-system/frr-k8s-9sscr" Nov 21 13:59:05 crc kubenswrapper[5133]: I1121 13:59:05.448727 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/3a9b4a92-1fba-4b32-94c3-be5343dae8d2-metallb-excludel2\") pod \"speaker-nstmv\" (UID: \"3a9b4a92-1fba-4b32-94c3-be5343dae8d2\") " pod="metallb-system/speaker-nstmv" Nov 21 13:59:05 crc kubenswrapper[5133]: I1121 13:59:05.448748 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f18f6a68-8b84-4830-a504-70170e7e0125-metrics-certs\") pod \"controller-6c7b4b5f48-h9nrl\" (UID: \"f18f6a68-8b84-4830-a504-70170e7e0125\") " pod="metallb-system/controller-6c7b4b5f48-h9nrl" Nov 21 13:59:05 crc kubenswrapper[5133]: I1121 13:59:05.448767 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fq24j\" (UniqueName: \"kubernetes.io/projected/f18f6a68-8b84-4830-a504-70170e7e0125-kube-api-access-fq24j\") pod \"controller-6c7b4b5f48-h9nrl\" (UID: \"f18f6a68-8b84-4830-a504-70170e7e0125\") " pod="metallb-system/controller-6c7b4b5f48-h9nrl" Nov 21 13:59:05 crc kubenswrapper[5133]: I1121 13:59:05.448785 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/3a9b4a92-1fba-4b32-94c3-be5343dae8d2-memberlist\") pod \"speaker-nstmv\" (UID: \"3a9b4a92-1fba-4b32-94c3-be5343dae8d2\") " pod="metallb-system/speaker-nstmv" Nov 21 13:59:05 crc kubenswrapper[5133]: I1121 13:59:05.448917 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/48504bed-9d84-42d4-8ec2-c98bcb981b11-metrics\") pod \"frr-k8s-9sscr\" (UID: \"48504bed-9d84-42d4-8ec2-c98bcb981b11\") " pod="metallb-system/frr-k8s-9sscr" Nov 21 13:59:05 crc kubenswrapper[5133]: I1121 13:59:05.449150 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/48504bed-9d84-42d4-8ec2-c98bcb981b11-frr-conf\") pod \"frr-k8s-9sscr\" (UID: \"48504bed-9d84-42d4-8ec2-c98bcb981b11\") " pod="metallb-system/frr-k8s-9sscr" Nov 21 13:59:05 crc kubenswrapper[5133]: I1121 13:59:05.449236 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/48504bed-9d84-42d4-8ec2-c98bcb981b11-frr-sockets\") pod \"frr-k8s-9sscr\" (UID: \"48504bed-9d84-42d4-8ec2-c98bcb981b11\") " pod="metallb-system/frr-k8s-9sscr" Nov 21 13:59:05 crc kubenswrapper[5133]: I1121 13:59:05.449549 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/48504bed-9d84-42d4-8ec2-c98bcb981b11-reloader\") pod \"frr-k8s-9sscr\" (UID: \"48504bed-9d84-42d4-8ec2-c98bcb981b11\") " pod="metallb-system/frr-k8s-9sscr" Nov 21 13:59:05 crc kubenswrapper[5133]: I1121 13:59:05.449769 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/48504bed-9d84-42d4-8ec2-c98bcb981b11-frr-startup\") pod \"frr-k8s-9sscr\" (UID: \"48504bed-9d84-42d4-8ec2-c98bcb981b11\") " pod="metallb-system/frr-k8s-9sscr" Nov 21 13:59:05 crc kubenswrapper[5133]: I1121 13:59:05.456800 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/67474cce-10bd-4da6-895f-a7e465d362a6-cert\") pod \"frr-k8s-webhook-server-6998585d5-9drp9\" (UID: \"67474cce-10bd-4da6-895f-a7e465d362a6\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-9drp9" Nov 21 13:59:05 crc kubenswrapper[5133]: I1121 13:59:05.458827 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/48504bed-9d84-42d4-8ec2-c98bcb981b11-metrics-certs\") pod \"frr-k8s-9sscr\" (UID: \"48504bed-9d84-42d4-8ec2-c98bcb981b11\") " pod="metallb-system/frr-k8s-9sscr" Nov 21 13:59:05 crc kubenswrapper[5133]: I1121 13:59:05.474913 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6c7b4b5f48-h9nrl"] Nov 21 13:59:05 crc kubenswrapper[5133]: I1121 13:59:05.478517 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p6vkx\" (UniqueName: \"kubernetes.io/projected/48504bed-9d84-42d4-8ec2-c98bcb981b11-kube-api-access-p6vkx\") pod \"frr-k8s-9sscr\" (UID: \"48504bed-9d84-42d4-8ec2-c98bcb981b11\") " pod="metallb-system/frr-k8s-9sscr" Nov 21 13:59:05 crc kubenswrapper[5133]: I1121 13:59:05.478589 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lx66r\" (UniqueName: \"kubernetes.io/projected/67474cce-10bd-4da6-895f-a7e465d362a6-kube-api-access-lx66r\") pod \"frr-k8s-webhook-server-6998585d5-9drp9\" (UID: \"67474cce-10bd-4da6-895f-a7e465d362a6\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-9drp9" Nov 21 13:59:05 crc kubenswrapper[5133]: I1121 13:59:05.549278 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lr2js\" (UniqueName: \"kubernetes.io/projected/3a9b4a92-1fba-4b32-94c3-be5343dae8d2-kube-api-access-lr2js\") pod \"speaker-nstmv\" (UID: \"3a9b4a92-1fba-4b32-94c3-be5343dae8d2\") " pod="metallb-system/speaker-nstmv" Nov 21 13:59:05 crc kubenswrapper[5133]: I1121 13:59:05.549343 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/f18f6a68-8b84-4830-a504-70170e7e0125-cert\") pod \"controller-6c7b4b5f48-h9nrl\" (UID: \"f18f6a68-8b84-4830-a504-70170e7e0125\") " pod="metallb-system/controller-6c7b4b5f48-h9nrl" Nov 21 13:59:05 crc kubenswrapper[5133]: I1121 13:59:05.549418 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/3a9b4a92-1fba-4b32-94c3-be5343dae8d2-metallb-excludel2\") pod \"speaker-nstmv\" (UID: \"3a9b4a92-1fba-4b32-94c3-be5343dae8d2\") " pod="metallb-system/speaker-nstmv" Nov 21 13:59:05 crc kubenswrapper[5133]: I1121 13:59:05.549445 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f18f6a68-8b84-4830-a504-70170e7e0125-metrics-certs\") pod \"controller-6c7b4b5f48-h9nrl\" (UID: \"f18f6a68-8b84-4830-a504-70170e7e0125\") " pod="metallb-system/controller-6c7b4b5f48-h9nrl" Nov 21 13:59:05 crc kubenswrapper[5133]: I1121 13:59:05.549466 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fq24j\" (UniqueName: \"kubernetes.io/projected/f18f6a68-8b84-4830-a504-70170e7e0125-kube-api-access-fq24j\") pod \"controller-6c7b4b5f48-h9nrl\" (UID: \"f18f6a68-8b84-4830-a504-70170e7e0125\") " pod="metallb-system/controller-6c7b4b5f48-h9nrl" Nov 21 13:59:05 crc kubenswrapper[5133]: I1121 13:59:05.549483 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/3a9b4a92-1fba-4b32-94c3-be5343dae8d2-memberlist\") pod \"speaker-nstmv\" (UID: \"3a9b4a92-1fba-4b32-94c3-be5343dae8d2\") " pod="metallb-system/speaker-nstmv" Nov 21 13:59:05 crc kubenswrapper[5133]: I1121 13:59:05.549510 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3a9b4a92-1fba-4b32-94c3-be5343dae8d2-metrics-certs\") pod \"speaker-nstmv\" (UID: \"3a9b4a92-1fba-4b32-94c3-be5343dae8d2\") " pod="metallb-system/speaker-nstmv" Nov 21 13:59:05 crc kubenswrapper[5133]: E1121 13:59:05.549677 5133 secret.go:188] Couldn't get secret metallb-system/speaker-certs-secret: secret "speaker-certs-secret" not found Nov 21 13:59:05 crc kubenswrapper[5133]: E1121 13:59:05.549753 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3a9b4a92-1fba-4b32-94c3-be5343dae8d2-metrics-certs podName:3a9b4a92-1fba-4b32-94c3-be5343dae8d2 nodeName:}" failed. No retries permitted until 2025-11-21 13:59:06.049727673 +0000 UTC m=+1005.847559921 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/3a9b4a92-1fba-4b32-94c3-be5343dae8d2-metrics-certs") pod "speaker-nstmv" (UID: "3a9b4a92-1fba-4b32-94c3-be5343dae8d2") : secret "speaker-certs-secret" not found Nov 21 13:59:05 crc kubenswrapper[5133]: E1121 13:59:05.550212 5133 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 21 13:59:05 crc kubenswrapper[5133]: E1121 13:59:05.550318 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3a9b4a92-1fba-4b32-94c3-be5343dae8d2-memberlist podName:3a9b4a92-1fba-4b32-94c3-be5343dae8d2 nodeName:}" failed. No retries permitted until 2025-11-21 13:59:06.050289108 +0000 UTC m=+1005.848121546 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/3a9b4a92-1fba-4b32-94c3-be5343dae8d2-memberlist") pod "speaker-nstmv" (UID: "3a9b4a92-1fba-4b32-94c3-be5343dae8d2") : secret "metallb-memberlist" not found Nov 21 13:59:05 crc kubenswrapper[5133]: I1121 13:59:05.550312 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/3a9b4a92-1fba-4b32-94c3-be5343dae8d2-metallb-excludel2\") pod \"speaker-nstmv\" (UID: \"3a9b4a92-1fba-4b32-94c3-be5343dae8d2\") " pod="metallb-system/speaker-nstmv" Nov 21 13:59:05 crc kubenswrapper[5133]: I1121 13:59:05.554404 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/f18f6a68-8b84-4830-a504-70170e7e0125-cert\") pod \"controller-6c7b4b5f48-h9nrl\" (UID: \"f18f6a68-8b84-4830-a504-70170e7e0125\") " pod="metallb-system/controller-6c7b4b5f48-h9nrl" Nov 21 13:59:05 crc kubenswrapper[5133]: I1121 13:59:05.564789 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f18f6a68-8b84-4830-a504-70170e7e0125-metrics-certs\") pod \"controller-6c7b4b5f48-h9nrl\" (UID: \"f18f6a68-8b84-4830-a504-70170e7e0125\") " pod="metallb-system/controller-6c7b4b5f48-h9nrl" Nov 21 13:59:05 crc kubenswrapper[5133]: I1121 13:59:05.570928 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fq24j\" (UniqueName: \"kubernetes.io/projected/f18f6a68-8b84-4830-a504-70170e7e0125-kube-api-access-fq24j\") pod \"controller-6c7b4b5f48-h9nrl\" (UID: \"f18f6a68-8b84-4830-a504-70170e7e0125\") " pod="metallb-system/controller-6c7b4b5f48-h9nrl" Nov 21 13:59:05 crc kubenswrapper[5133]: I1121 13:59:05.571943 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lr2js\" (UniqueName: \"kubernetes.io/projected/3a9b4a92-1fba-4b32-94c3-be5343dae8d2-kube-api-access-lr2js\") pod \"speaker-nstmv\" (UID: \"3a9b4a92-1fba-4b32-94c3-be5343dae8d2\") " pod="metallb-system/speaker-nstmv" Nov 21 13:59:05 crc kubenswrapper[5133]: I1121 13:59:05.640225 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-6998585d5-9drp9" Nov 21 13:59:05 crc kubenswrapper[5133]: I1121 13:59:05.661394 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-9sscr" Nov 21 13:59:05 crc kubenswrapper[5133]: I1121 13:59:05.806761 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6c7b4b5f48-h9nrl" Nov 21 13:59:06 crc kubenswrapper[5133]: I1121 13:59:06.058739 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/3a9b4a92-1fba-4b32-94c3-be5343dae8d2-memberlist\") pod \"speaker-nstmv\" (UID: \"3a9b4a92-1fba-4b32-94c3-be5343dae8d2\") " pod="metallb-system/speaker-nstmv" Nov 21 13:59:06 crc kubenswrapper[5133]: I1121 13:59:06.059167 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3a9b4a92-1fba-4b32-94c3-be5343dae8d2-metrics-certs\") pod \"speaker-nstmv\" (UID: \"3a9b4a92-1fba-4b32-94c3-be5343dae8d2\") " pod="metallb-system/speaker-nstmv" Nov 21 13:59:06 crc kubenswrapper[5133]: E1121 13:59:06.059197 5133 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 21 13:59:06 crc kubenswrapper[5133]: E1121 13:59:06.059330 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3a9b4a92-1fba-4b32-94c3-be5343dae8d2-memberlist podName:3a9b4a92-1fba-4b32-94c3-be5343dae8d2 nodeName:}" failed. No retries permitted until 2025-11-21 13:59:07.059309802 +0000 UTC m=+1006.857142050 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/3a9b4a92-1fba-4b32-94c3-be5343dae8d2-memberlist") pod "speaker-nstmv" (UID: "3a9b4a92-1fba-4b32-94c3-be5343dae8d2") : secret "metallb-memberlist" not found Nov 21 13:59:06 crc kubenswrapper[5133]: I1121 13:59:06.066393 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3a9b4a92-1fba-4b32-94c3-be5343dae8d2-metrics-certs\") pod \"speaker-nstmv\" (UID: \"3a9b4a92-1fba-4b32-94c3-be5343dae8d2\") " pod="metallb-system/speaker-nstmv" Nov 21 13:59:06 crc kubenswrapper[5133]: I1121 13:59:06.140076 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6c7b4b5f48-h9nrl"] Nov 21 13:59:06 crc kubenswrapper[5133]: I1121 13:59:06.140405 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-9sscr" event={"ID":"48504bed-9d84-42d4-8ec2-c98bcb981b11","Type":"ContainerStarted","Data":"2c2a7e5955d91a3b2ce4dd4f35b454ecf377f9b62b88454bde131cf5ffc7395c"} Nov 21 13:59:06 crc kubenswrapper[5133]: W1121 13:59:06.147667 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf18f6a68_8b84_4830_a504_70170e7e0125.slice/crio-633ae427c21b7dcf9e4caad72ae4ceca887bd2e7e618903dd4b636c9ea3c3926 WatchSource:0}: Error finding container 633ae427c21b7dcf9e4caad72ae4ceca887bd2e7e618903dd4b636c9ea3c3926: Status 404 returned error can't find the container with id 633ae427c21b7dcf9e4caad72ae4ceca887bd2e7e618903dd4b636c9ea3c3926 Nov 21 13:59:06 crc kubenswrapper[5133]: I1121 13:59:06.203237 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-9drp9"] Nov 21 13:59:06 crc kubenswrapper[5133]: W1121 13:59:06.212953 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod67474cce_10bd_4da6_895f_a7e465d362a6.slice/crio-f59f87040399ce2ba1598ade80552b59d24e9d94a39bc898f2e1a7c9349e9f47 WatchSource:0}: Error finding container f59f87040399ce2ba1598ade80552b59d24e9d94a39bc898f2e1a7c9349e9f47: Status 404 returned error can't find the container with id f59f87040399ce2ba1598ade80552b59d24e9d94a39bc898f2e1a7c9349e9f47 Nov 21 13:59:07 crc kubenswrapper[5133]: I1121 13:59:07.025721 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-xgn99" Nov 21 13:59:07 crc kubenswrapper[5133]: I1121 13:59:07.076642 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/3a9b4a92-1fba-4b32-94c3-be5343dae8d2-memberlist\") pod \"speaker-nstmv\" (UID: \"3a9b4a92-1fba-4b32-94c3-be5343dae8d2\") " pod="metallb-system/speaker-nstmv" Nov 21 13:59:07 crc kubenswrapper[5133]: I1121 13:59:07.086610 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-xgn99"] Nov 21 13:59:07 crc kubenswrapper[5133]: I1121 13:59:07.094700 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/3a9b4a92-1fba-4b32-94c3-be5343dae8d2-memberlist\") pod \"speaker-nstmv\" (UID: \"3a9b4a92-1fba-4b32-94c3-be5343dae8d2\") " pod="metallb-system/speaker-nstmv" Nov 21 13:59:07 crc kubenswrapper[5133]: I1121 13:59:07.148246 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-6998585d5-9drp9" event={"ID":"67474cce-10bd-4da6-895f-a7e465d362a6","Type":"ContainerStarted","Data":"f59f87040399ce2ba1598ade80552b59d24e9d94a39bc898f2e1a7c9349e9f47"} Nov 21 13:59:07 crc kubenswrapper[5133]: I1121 13:59:07.150587 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-xgn99" podUID="2ff9998c-bbe6-41a4-8f71-0bbb52c029ba" containerName="registry-server" containerID="cri-o://d4b10e629050fcbd133fcc9f680614dc00c3a7701ba89358549687545346c122" gracePeriod=2 Nov 21 13:59:07 crc kubenswrapper[5133]: I1121 13:59:07.151289 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-h9nrl" event={"ID":"f18f6a68-8b84-4830-a504-70170e7e0125","Type":"ContainerStarted","Data":"d64b5c202fd45928217e8a4f159983a0b67c761e9e22055445a97424c5327277"} Nov 21 13:59:07 crc kubenswrapper[5133]: I1121 13:59:07.151379 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-h9nrl" event={"ID":"f18f6a68-8b84-4830-a504-70170e7e0125","Type":"ContainerStarted","Data":"c17e490aca5a0813f28de5f27c1fd3459ff1801035d0de390d1c4412050c5ec3"} Nov 21 13:59:07 crc kubenswrapper[5133]: I1121 13:59:07.151399 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-h9nrl" event={"ID":"f18f6a68-8b84-4830-a504-70170e7e0125","Type":"ContainerStarted","Data":"633ae427c21b7dcf9e4caad72ae4ceca887bd2e7e618903dd4b636c9ea3c3926"} Nov 21 13:59:07 crc kubenswrapper[5133]: I1121 13:59:07.151540 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-6c7b4b5f48-h9nrl" Nov 21 13:59:07 crc kubenswrapper[5133]: I1121 13:59:07.190584 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-6c7b4b5f48-h9nrl" podStartSLOduration=2.190553977 podStartE2EDuration="2.190553977s" podCreationTimestamp="2025-11-21 13:59:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 13:59:07.188566044 +0000 UTC m=+1006.986398292" watchObservedRunningTime="2025-11-21 13:59:07.190553977 +0000 UTC m=+1006.988386235" Nov 21 13:59:07 crc kubenswrapper[5133]: I1121 13:59:07.238238 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-nstmv" Nov 21 13:59:07 crc kubenswrapper[5133]: W1121 13:59:07.272534 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3a9b4a92_1fba_4b32_94c3_be5343dae8d2.slice/crio-8387edf3f4f8cd75e4a8a648ecf286444fd5bafe928f7a9407197600ba678cd6 WatchSource:0}: Error finding container 8387edf3f4f8cd75e4a8a648ecf286444fd5bafe928f7a9407197600ba678cd6: Status 404 returned error can't find the container with id 8387edf3f4f8cd75e4a8a648ecf286444fd5bafe928f7a9407197600ba678cd6 Nov 21 13:59:07 crc kubenswrapper[5133]: I1121 13:59:07.583698 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xgn99" Nov 21 13:59:07 crc kubenswrapper[5133]: I1121 13:59:07.696616 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qggbh\" (UniqueName: \"kubernetes.io/projected/2ff9998c-bbe6-41a4-8f71-0bbb52c029ba-kube-api-access-qggbh\") pod \"2ff9998c-bbe6-41a4-8f71-0bbb52c029ba\" (UID: \"2ff9998c-bbe6-41a4-8f71-0bbb52c029ba\") " Nov 21 13:59:07 crc kubenswrapper[5133]: I1121 13:59:07.696671 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2ff9998c-bbe6-41a4-8f71-0bbb52c029ba-utilities\") pod \"2ff9998c-bbe6-41a4-8f71-0bbb52c029ba\" (UID: \"2ff9998c-bbe6-41a4-8f71-0bbb52c029ba\") " Nov 21 13:59:07 crc kubenswrapper[5133]: I1121 13:59:07.696694 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2ff9998c-bbe6-41a4-8f71-0bbb52c029ba-catalog-content\") pod \"2ff9998c-bbe6-41a4-8f71-0bbb52c029ba\" (UID: \"2ff9998c-bbe6-41a4-8f71-0bbb52c029ba\") " Nov 21 13:59:07 crc kubenswrapper[5133]: I1121 13:59:07.697904 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2ff9998c-bbe6-41a4-8f71-0bbb52c029ba-utilities" (OuterVolumeSpecName: "utilities") pod "2ff9998c-bbe6-41a4-8f71-0bbb52c029ba" (UID: "2ff9998c-bbe6-41a4-8f71-0bbb52c029ba"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 13:59:07 crc kubenswrapper[5133]: I1121 13:59:07.704527 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2ff9998c-bbe6-41a4-8f71-0bbb52c029ba-kube-api-access-qggbh" (OuterVolumeSpecName: "kube-api-access-qggbh") pod "2ff9998c-bbe6-41a4-8f71-0bbb52c029ba" (UID: "2ff9998c-bbe6-41a4-8f71-0bbb52c029ba"). InnerVolumeSpecName "kube-api-access-qggbh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:59:07 crc kubenswrapper[5133]: I1121 13:59:07.715623 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2ff9998c-bbe6-41a4-8f71-0bbb52c029ba-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2ff9998c-bbe6-41a4-8f71-0bbb52c029ba" (UID: "2ff9998c-bbe6-41a4-8f71-0bbb52c029ba"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 13:59:07 crc kubenswrapper[5133]: I1121 13:59:07.801926 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qggbh\" (UniqueName: \"kubernetes.io/projected/2ff9998c-bbe6-41a4-8f71-0bbb52c029ba-kube-api-access-qggbh\") on node \"crc\" DevicePath \"\"" Nov 21 13:59:07 crc kubenswrapper[5133]: I1121 13:59:07.802608 5133 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2ff9998c-bbe6-41a4-8f71-0bbb52c029ba-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 13:59:07 crc kubenswrapper[5133]: I1121 13:59:07.802626 5133 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2ff9998c-bbe6-41a4-8f71-0bbb52c029ba-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 13:59:08 crc kubenswrapper[5133]: I1121 13:59:08.167740 5133 generic.go:334] "Generic (PLEG): container finished" podID="2ff9998c-bbe6-41a4-8f71-0bbb52c029ba" containerID="d4b10e629050fcbd133fcc9f680614dc00c3a7701ba89358549687545346c122" exitCode=0 Nov 21 13:59:08 crc kubenswrapper[5133]: I1121 13:59:08.167832 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xgn99" event={"ID":"2ff9998c-bbe6-41a4-8f71-0bbb52c029ba","Type":"ContainerDied","Data":"d4b10e629050fcbd133fcc9f680614dc00c3a7701ba89358549687545346c122"} Nov 21 13:59:08 crc kubenswrapper[5133]: I1121 13:59:08.167885 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xgn99" Nov 21 13:59:08 crc kubenswrapper[5133]: I1121 13:59:08.167919 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xgn99" event={"ID":"2ff9998c-bbe6-41a4-8f71-0bbb52c029ba","Type":"ContainerDied","Data":"1e81fdf084a88e9ba79ebd448192d084bad3b25cc79fb8bef74aa02aa56be7ec"} Nov 21 13:59:08 crc kubenswrapper[5133]: I1121 13:59:08.167945 5133 scope.go:117] "RemoveContainer" containerID="d4b10e629050fcbd133fcc9f680614dc00c3a7701ba89358549687545346c122" Nov 21 13:59:08 crc kubenswrapper[5133]: I1121 13:59:08.170297 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-nstmv" event={"ID":"3a9b4a92-1fba-4b32-94c3-be5343dae8d2","Type":"ContainerStarted","Data":"7dd9b392b6ad6bce368fe1c948b93cf4ec4fe69a3edebabf663dd222652313fd"} Nov 21 13:59:08 crc kubenswrapper[5133]: I1121 13:59:08.170332 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-nstmv" event={"ID":"3a9b4a92-1fba-4b32-94c3-be5343dae8d2","Type":"ContainerStarted","Data":"8387edf3f4f8cd75e4a8a648ecf286444fd5bafe928f7a9407197600ba678cd6"} Nov 21 13:59:08 crc kubenswrapper[5133]: I1121 13:59:08.200412 5133 scope.go:117] "RemoveContainer" containerID="ad5f405088e63b9ccc2a3bb42484e0006379a5c972ed93a76a572060e2a54eed" Nov 21 13:59:08 crc kubenswrapper[5133]: I1121 13:59:08.210578 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-xgn99"] Nov 21 13:59:08 crc kubenswrapper[5133]: I1121 13:59:08.217340 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-xgn99"] Nov 21 13:59:08 crc kubenswrapper[5133]: I1121 13:59:08.239790 5133 scope.go:117] "RemoveContainer" containerID="e1c058468f2f2d9590f5172ee42b346738582b777a4d76acef115ea392f87026" Nov 21 13:59:08 crc kubenswrapper[5133]: I1121 13:59:08.265355 5133 scope.go:117] "RemoveContainer" containerID="d4b10e629050fcbd133fcc9f680614dc00c3a7701ba89358549687545346c122" Nov 21 13:59:08 crc kubenswrapper[5133]: E1121 13:59:08.267657 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d4b10e629050fcbd133fcc9f680614dc00c3a7701ba89358549687545346c122\": container with ID starting with d4b10e629050fcbd133fcc9f680614dc00c3a7701ba89358549687545346c122 not found: ID does not exist" containerID="d4b10e629050fcbd133fcc9f680614dc00c3a7701ba89358549687545346c122" Nov 21 13:59:08 crc kubenswrapper[5133]: I1121 13:59:08.267730 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d4b10e629050fcbd133fcc9f680614dc00c3a7701ba89358549687545346c122"} err="failed to get container status \"d4b10e629050fcbd133fcc9f680614dc00c3a7701ba89358549687545346c122\": rpc error: code = NotFound desc = could not find container \"d4b10e629050fcbd133fcc9f680614dc00c3a7701ba89358549687545346c122\": container with ID starting with d4b10e629050fcbd133fcc9f680614dc00c3a7701ba89358549687545346c122 not found: ID does not exist" Nov 21 13:59:08 crc kubenswrapper[5133]: I1121 13:59:08.267773 5133 scope.go:117] "RemoveContainer" containerID="ad5f405088e63b9ccc2a3bb42484e0006379a5c972ed93a76a572060e2a54eed" Nov 21 13:59:08 crc kubenswrapper[5133]: E1121 13:59:08.271341 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ad5f405088e63b9ccc2a3bb42484e0006379a5c972ed93a76a572060e2a54eed\": container with ID starting with ad5f405088e63b9ccc2a3bb42484e0006379a5c972ed93a76a572060e2a54eed not found: ID does not exist" containerID="ad5f405088e63b9ccc2a3bb42484e0006379a5c972ed93a76a572060e2a54eed" Nov 21 13:59:08 crc kubenswrapper[5133]: I1121 13:59:08.271411 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ad5f405088e63b9ccc2a3bb42484e0006379a5c972ed93a76a572060e2a54eed"} err="failed to get container status \"ad5f405088e63b9ccc2a3bb42484e0006379a5c972ed93a76a572060e2a54eed\": rpc error: code = NotFound desc = could not find container \"ad5f405088e63b9ccc2a3bb42484e0006379a5c972ed93a76a572060e2a54eed\": container with ID starting with ad5f405088e63b9ccc2a3bb42484e0006379a5c972ed93a76a572060e2a54eed not found: ID does not exist" Nov 21 13:59:08 crc kubenswrapper[5133]: I1121 13:59:08.271445 5133 scope.go:117] "RemoveContainer" containerID="e1c058468f2f2d9590f5172ee42b346738582b777a4d76acef115ea392f87026" Nov 21 13:59:08 crc kubenswrapper[5133]: E1121 13:59:08.276822 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e1c058468f2f2d9590f5172ee42b346738582b777a4d76acef115ea392f87026\": container with ID starting with e1c058468f2f2d9590f5172ee42b346738582b777a4d76acef115ea392f87026 not found: ID does not exist" containerID="e1c058468f2f2d9590f5172ee42b346738582b777a4d76acef115ea392f87026" Nov 21 13:59:08 crc kubenswrapper[5133]: I1121 13:59:08.276855 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e1c058468f2f2d9590f5172ee42b346738582b777a4d76acef115ea392f87026"} err="failed to get container status \"e1c058468f2f2d9590f5172ee42b346738582b777a4d76acef115ea392f87026\": rpc error: code = NotFound desc = could not find container \"e1c058468f2f2d9590f5172ee42b346738582b777a4d76acef115ea392f87026\": container with ID starting with e1c058468f2f2d9590f5172ee42b346738582b777a4d76acef115ea392f87026 not found: ID does not exist" Nov 21 13:59:08 crc kubenswrapper[5133]: I1121 13:59:08.478669 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2ff9998c-bbe6-41a4-8f71-0bbb52c029ba" path="/var/lib/kubelet/pods/2ff9998c-bbe6-41a4-8f71-0bbb52c029ba/volumes" Nov 21 13:59:09 crc kubenswrapper[5133]: I1121 13:59:09.182680 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-nstmv" event={"ID":"3a9b4a92-1fba-4b32-94c3-be5343dae8d2","Type":"ContainerStarted","Data":"33233412125f846b796e80bbb3a275c6ac2cf84f944f9483f105b70b5b864cba"} Nov 21 13:59:09 crc kubenswrapper[5133]: I1121 13:59:09.183216 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-nstmv" Nov 21 13:59:09 crc kubenswrapper[5133]: I1121 13:59:09.205105 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-nstmv" podStartSLOduration=4.205079446 podStartE2EDuration="4.205079446s" podCreationTimestamp="2025-11-21 13:59:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 13:59:09.201823019 +0000 UTC m=+1008.999655277" watchObservedRunningTime="2025-11-21 13:59:09.205079446 +0000 UTC m=+1009.002911694" Nov 21 13:59:15 crc kubenswrapper[5133]: I1121 13:59:15.246909 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-9sscr" event={"ID":"48504bed-9d84-42d4-8ec2-c98bcb981b11","Type":"ContainerStarted","Data":"3215968cb754b8830cc5179f52274d867c76a4d6930c6cff9386a141faf5625d"} Nov 21 13:59:15 crc kubenswrapper[5133]: I1121 13:59:15.249131 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-6998585d5-9drp9" event={"ID":"67474cce-10bd-4da6-895f-a7e465d362a6","Type":"ContainerStarted","Data":"a20307b36f0934ae0973b76f85973d90c7900146e7cb623faa60c319c12cf512"} Nov 21 13:59:15 crc kubenswrapper[5133]: I1121 13:59:15.249379 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-6998585d5-9drp9" Nov 21 13:59:15 crc kubenswrapper[5133]: I1121 13:59:15.267754 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-6998585d5-9drp9" podStartSLOduration=1.419065713 podStartE2EDuration="10.267732743s" podCreationTimestamp="2025-11-21 13:59:05 +0000 UTC" firstStartedPulling="2025-11-21 13:59:06.217224025 +0000 UTC m=+1006.015056273" lastFinishedPulling="2025-11-21 13:59:15.065891055 +0000 UTC m=+1014.863723303" observedRunningTime="2025-11-21 13:59:15.264954409 +0000 UTC m=+1015.062786707" watchObservedRunningTime="2025-11-21 13:59:15.267732743 +0000 UTC m=+1015.065565001" Nov 21 13:59:16 crc kubenswrapper[5133]: I1121 13:59:16.258754 5133 generic.go:334] "Generic (PLEG): container finished" podID="48504bed-9d84-42d4-8ec2-c98bcb981b11" containerID="3215968cb754b8830cc5179f52274d867c76a4d6930c6cff9386a141faf5625d" exitCode=0 Nov 21 13:59:16 crc kubenswrapper[5133]: I1121 13:59:16.258872 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-9sscr" event={"ID":"48504bed-9d84-42d4-8ec2-c98bcb981b11","Type":"ContainerDied","Data":"3215968cb754b8830cc5179f52274d867c76a4d6930c6cff9386a141faf5625d"} Nov 21 13:59:17 crc kubenswrapper[5133]: I1121 13:59:17.244275 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-nstmv" Nov 21 13:59:17 crc kubenswrapper[5133]: I1121 13:59:17.269743 5133 generic.go:334] "Generic (PLEG): container finished" podID="48504bed-9d84-42d4-8ec2-c98bcb981b11" containerID="2f173ecd65b8cc818eac9e194392f0457127ec271b126cacbde47062c78552a1" exitCode=0 Nov 21 13:59:17 crc kubenswrapper[5133]: I1121 13:59:17.269814 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-9sscr" event={"ID":"48504bed-9d84-42d4-8ec2-c98bcb981b11","Type":"ContainerDied","Data":"2f173ecd65b8cc818eac9e194392f0457127ec271b126cacbde47062c78552a1"} Nov 21 13:59:18 crc kubenswrapper[5133]: I1121 13:59:18.279556 5133 generic.go:334] "Generic (PLEG): container finished" podID="48504bed-9d84-42d4-8ec2-c98bcb981b11" containerID="af4f899bcfd56156405892ad9a74105f1411f761b3041fb0d4f8f620d0b5f0d3" exitCode=0 Nov 21 13:59:18 crc kubenswrapper[5133]: I1121 13:59:18.279691 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-9sscr" event={"ID":"48504bed-9d84-42d4-8ec2-c98bcb981b11","Type":"ContainerDied","Data":"af4f899bcfd56156405892ad9a74105f1411f761b3041fb0d4f8f620d0b5f0d3"} Nov 21 13:59:19 crc kubenswrapper[5133]: I1121 13:59:19.298286 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-9sscr" event={"ID":"48504bed-9d84-42d4-8ec2-c98bcb981b11","Type":"ContainerStarted","Data":"8ac9c4937f6fc960aa8ec6aeb6a762e78cb5c87c7d1b649a718fc2bf2030ae39"} Nov 21 13:59:19 crc kubenswrapper[5133]: I1121 13:59:19.298881 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-9sscr" event={"ID":"48504bed-9d84-42d4-8ec2-c98bcb981b11","Type":"ContainerStarted","Data":"eef057d6fcde15ddb86e218ebc57d681c1650ff8566d8d82150c27c1a6d2a9d5"} Nov 21 13:59:19 crc kubenswrapper[5133]: I1121 13:59:19.298904 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-9sscr" event={"ID":"48504bed-9d84-42d4-8ec2-c98bcb981b11","Type":"ContainerStarted","Data":"8d1b034e9aa345d0a4e92c76204a717ab37913d298b2f5d4b8db19ff52c54962"} Nov 21 13:59:19 crc kubenswrapper[5133]: I1121 13:59:19.298917 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-9sscr" event={"ID":"48504bed-9d84-42d4-8ec2-c98bcb981b11","Type":"ContainerStarted","Data":"12f0402af5c79a374b8ae771077aa3900334fe47d47793f84490a92daf32f20d"} Nov 21 13:59:19 crc kubenswrapper[5133]: I1121 13:59:19.298928 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-9sscr" event={"ID":"48504bed-9d84-42d4-8ec2-c98bcb981b11","Type":"ContainerStarted","Data":"a3c26e9b1b250168d3351c20e6461dd07445afb7abcef9dda0ede4b7b245a225"} Nov 21 13:59:20 crc kubenswrapper[5133]: I1121 13:59:20.310305 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-9sscr" event={"ID":"48504bed-9d84-42d4-8ec2-c98bcb981b11","Type":"ContainerStarted","Data":"28ab7c0da3918fd43206ffca92411182294a5cb4939a02a3ba4c4846f68bd739"} Nov 21 13:59:20 crc kubenswrapper[5133]: I1121 13:59:20.310742 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-9sscr" Nov 21 13:59:20 crc kubenswrapper[5133]: I1121 13:59:20.339344 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-9sscr" podStartSLOduration=6.159860858 podStartE2EDuration="15.339322544s" podCreationTimestamp="2025-11-21 13:59:05 +0000 UTC" firstStartedPulling="2025-11-21 13:59:05.849180792 +0000 UTC m=+1005.647013040" lastFinishedPulling="2025-11-21 13:59:15.028642478 +0000 UTC m=+1014.826474726" observedRunningTime="2025-11-21 13:59:20.336800567 +0000 UTC m=+1020.134632825" watchObservedRunningTime="2025-11-21 13:59:20.339322544 +0000 UTC m=+1020.137154802" Nov 21 13:59:20 crc kubenswrapper[5133]: I1121 13:59:20.662217 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-9sscr" Nov 21 13:59:20 crc kubenswrapper[5133]: I1121 13:59:20.701741 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-9sscr" Nov 21 13:59:23 crc kubenswrapper[5133]: I1121 13:59:23.311105 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 13:59:23 crc kubenswrapper[5133]: I1121 13:59:23.311989 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 13:59:23 crc kubenswrapper[5133]: I1121 13:59:23.710433 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-r5rkm"] Nov 21 13:59:23 crc kubenswrapper[5133]: E1121 13:59:23.710751 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2ff9998c-bbe6-41a4-8f71-0bbb52c029ba" containerName="extract-content" Nov 21 13:59:23 crc kubenswrapper[5133]: I1121 13:59:23.710764 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ff9998c-bbe6-41a4-8f71-0bbb52c029ba" containerName="extract-content" Nov 21 13:59:23 crc kubenswrapper[5133]: E1121 13:59:23.710773 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2ff9998c-bbe6-41a4-8f71-0bbb52c029ba" containerName="extract-utilities" Nov 21 13:59:23 crc kubenswrapper[5133]: I1121 13:59:23.710780 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ff9998c-bbe6-41a4-8f71-0bbb52c029ba" containerName="extract-utilities" Nov 21 13:59:23 crc kubenswrapper[5133]: E1121 13:59:23.710790 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2ff9998c-bbe6-41a4-8f71-0bbb52c029ba" containerName="registry-server" Nov 21 13:59:23 crc kubenswrapper[5133]: I1121 13:59:23.710797 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ff9998c-bbe6-41a4-8f71-0bbb52c029ba" containerName="registry-server" Nov 21 13:59:23 crc kubenswrapper[5133]: I1121 13:59:23.710899 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="2ff9998c-bbe6-41a4-8f71-0bbb52c029ba" containerName="registry-server" Nov 21 13:59:23 crc kubenswrapper[5133]: I1121 13:59:23.711411 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-r5rkm" Nov 21 13:59:23 crc kubenswrapper[5133]: I1121 13:59:23.717472 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Nov 21 13:59:23 crc kubenswrapper[5133]: I1121 13:59:23.717897 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-4q8k2" Nov 21 13:59:23 crc kubenswrapper[5133]: I1121 13:59:23.725473 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-r5rkm"] Nov 21 13:59:23 crc kubenswrapper[5133]: I1121 13:59:23.725972 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Nov 21 13:59:23 crc kubenswrapper[5133]: I1121 13:59:23.856829 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q7f6c\" (UniqueName: \"kubernetes.io/projected/a92d6765-b45f-43f2-a1ae-c55689f599cb-kube-api-access-q7f6c\") pod \"openstack-operator-index-r5rkm\" (UID: \"a92d6765-b45f-43f2-a1ae-c55689f599cb\") " pod="openstack-operators/openstack-operator-index-r5rkm" Nov 21 13:59:23 crc kubenswrapper[5133]: I1121 13:59:23.958504 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q7f6c\" (UniqueName: \"kubernetes.io/projected/a92d6765-b45f-43f2-a1ae-c55689f599cb-kube-api-access-q7f6c\") pod \"openstack-operator-index-r5rkm\" (UID: \"a92d6765-b45f-43f2-a1ae-c55689f599cb\") " pod="openstack-operators/openstack-operator-index-r5rkm" Nov 21 13:59:23 crc kubenswrapper[5133]: I1121 13:59:23.985107 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q7f6c\" (UniqueName: \"kubernetes.io/projected/a92d6765-b45f-43f2-a1ae-c55689f599cb-kube-api-access-q7f6c\") pod \"openstack-operator-index-r5rkm\" (UID: \"a92d6765-b45f-43f2-a1ae-c55689f599cb\") " pod="openstack-operators/openstack-operator-index-r5rkm" Nov 21 13:59:24 crc kubenswrapper[5133]: I1121 13:59:24.030873 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-r5rkm" Nov 21 13:59:24 crc kubenswrapper[5133]: I1121 13:59:24.479041 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-r5rkm"] Nov 21 13:59:25 crc kubenswrapper[5133]: I1121 13:59:25.344754 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-r5rkm" event={"ID":"a92d6765-b45f-43f2-a1ae-c55689f599cb","Type":"ContainerStarted","Data":"19fb03cb7cdd8e83daf7b63980ac3e87dd44148bc8a9ab0c2622aab3d7197ca1"} Nov 21 13:59:25 crc kubenswrapper[5133]: I1121 13:59:25.644627 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-6998585d5-9drp9" Nov 21 13:59:25 crc kubenswrapper[5133]: I1121 13:59:25.811985 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-6c7b4b5f48-h9nrl" Nov 21 13:59:28 crc kubenswrapper[5133]: I1121 13:59:28.369410 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-r5rkm" event={"ID":"a92d6765-b45f-43f2-a1ae-c55689f599cb","Type":"ContainerStarted","Data":"ca6aaed1b4344b52647d8d61c66c7915aa687525d0c5f257c8373e0a1d291019"} Nov 21 13:59:28 crc kubenswrapper[5133]: I1121 13:59:28.387216 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-r5rkm" podStartSLOduration=2.303319767 podStartE2EDuration="5.387194266s" podCreationTimestamp="2025-11-21 13:59:23 +0000 UTC" firstStartedPulling="2025-11-21 13:59:24.48458299 +0000 UTC m=+1024.282415238" lastFinishedPulling="2025-11-21 13:59:27.568457499 +0000 UTC m=+1027.366289737" observedRunningTime="2025-11-21 13:59:28.385162542 +0000 UTC m=+1028.182994790" watchObservedRunningTime="2025-11-21 13:59:28.387194266 +0000 UTC m=+1028.185026514" Nov 21 13:59:34 crc kubenswrapper[5133]: I1121 13:59:34.031775 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-r5rkm" Nov 21 13:59:34 crc kubenswrapper[5133]: I1121 13:59:34.033305 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-r5rkm" Nov 21 13:59:34 crc kubenswrapper[5133]: I1121 13:59:34.065821 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-r5rkm" Nov 21 13:59:34 crc kubenswrapper[5133]: I1121 13:59:34.447550 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-r5rkm" Nov 21 13:59:35 crc kubenswrapper[5133]: I1121 13:59:35.667434 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-9sscr" Nov 21 13:59:51 crc kubenswrapper[5133]: I1121 13:59:51.945168 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/8918bc7d41988dccd7f823c453a3239d0c23cc745e2adb37204c019cb4hc86t"] Nov 21 13:59:51 crc kubenswrapper[5133]: I1121 13:59:51.948155 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/8918bc7d41988dccd7f823c453a3239d0c23cc745e2adb37204c019cb4hc86t" Nov 21 13:59:51 crc kubenswrapper[5133]: I1121 13:59:51.951335 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-7cs4t" Nov 21 13:59:51 crc kubenswrapper[5133]: I1121 13:59:51.964523 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/8918bc7d41988dccd7f823c453a3239d0c23cc745e2adb37204c019cb4hc86t"] Nov 21 13:59:52 crc kubenswrapper[5133]: I1121 13:59:52.114433 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/80d4af44-47ac-4fcf-a581-b22abd5ca264-bundle\") pod \"8918bc7d41988dccd7f823c453a3239d0c23cc745e2adb37204c019cb4hc86t\" (UID: \"80d4af44-47ac-4fcf-a581-b22abd5ca264\") " pod="openstack-operators/8918bc7d41988dccd7f823c453a3239d0c23cc745e2adb37204c019cb4hc86t" Nov 21 13:59:52 crc kubenswrapper[5133]: I1121 13:59:52.114786 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qxqsf\" (UniqueName: \"kubernetes.io/projected/80d4af44-47ac-4fcf-a581-b22abd5ca264-kube-api-access-qxqsf\") pod \"8918bc7d41988dccd7f823c453a3239d0c23cc745e2adb37204c019cb4hc86t\" (UID: \"80d4af44-47ac-4fcf-a581-b22abd5ca264\") " pod="openstack-operators/8918bc7d41988dccd7f823c453a3239d0c23cc745e2adb37204c019cb4hc86t" Nov 21 13:59:52 crc kubenswrapper[5133]: I1121 13:59:52.114891 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/80d4af44-47ac-4fcf-a581-b22abd5ca264-util\") pod \"8918bc7d41988dccd7f823c453a3239d0c23cc745e2adb37204c019cb4hc86t\" (UID: \"80d4af44-47ac-4fcf-a581-b22abd5ca264\") " pod="openstack-operators/8918bc7d41988dccd7f823c453a3239d0c23cc745e2adb37204c019cb4hc86t" Nov 21 13:59:52 crc kubenswrapper[5133]: I1121 13:59:52.216524 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qxqsf\" (UniqueName: \"kubernetes.io/projected/80d4af44-47ac-4fcf-a581-b22abd5ca264-kube-api-access-qxqsf\") pod \"8918bc7d41988dccd7f823c453a3239d0c23cc745e2adb37204c019cb4hc86t\" (UID: \"80d4af44-47ac-4fcf-a581-b22abd5ca264\") " pod="openstack-operators/8918bc7d41988dccd7f823c453a3239d0c23cc745e2adb37204c019cb4hc86t" Nov 21 13:59:52 crc kubenswrapper[5133]: I1121 13:59:52.216936 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/80d4af44-47ac-4fcf-a581-b22abd5ca264-util\") pod \"8918bc7d41988dccd7f823c453a3239d0c23cc745e2adb37204c019cb4hc86t\" (UID: \"80d4af44-47ac-4fcf-a581-b22abd5ca264\") " pod="openstack-operators/8918bc7d41988dccd7f823c453a3239d0c23cc745e2adb37204c019cb4hc86t" Nov 21 13:59:52 crc kubenswrapper[5133]: I1121 13:59:52.217140 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/80d4af44-47ac-4fcf-a581-b22abd5ca264-bundle\") pod \"8918bc7d41988dccd7f823c453a3239d0c23cc745e2adb37204c019cb4hc86t\" (UID: \"80d4af44-47ac-4fcf-a581-b22abd5ca264\") " pod="openstack-operators/8918bc7d41988dccd7f823c453a3239d0c23cc745e2adb37204c019cb4hc86t" Nov 21 13:59:52 crc kubenswrapper[5133]: I1121 13:59:52.218258 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/80d4af44-47ac-4fcf-a581-b22abd5ca264-bundle\") pod \"8918bc7d41988dccd7f823c453a3239d0c23cc745e2adb37204c019cb4hc86t\" (UID: \"80d4af44-47ac-4fcf-a581-b22abd5ca264\") " pod="openstack-operators/8918bc7d41988dccd7f823c453a3239d0c23cc745e2adb37204c019cb4hc86t" Nov 21 13:59:52 crc kubenswrapper[5133]: I1121 13:59:52.218386 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/80d4af44-47ac-4fcf-a581-b22abd5ca264-util\") pod \"8918bc7d41988dccd7f823c453a3239d0c23cc745e2adb37204c019cb4hc86t\" (UID: \"80d4af44-47ac-4fcf-a581-b22abd5ca264\") " pod="openstack-operators/8918bc7d41988dccd7f823c453a3239d0c23cc745e2adb37204c019cb4hc86t" Nov 21 13:59:52 crc kubenswrapper[5133]: I1121 13:59:52.241375 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qxqsf\" (UniqueName: \"kubernetes.io/projected/80d4af44-47ac-4fcf-a581-b22abd5ca264-kube-api-access-qxqsf\") pod \"8918bc7d41988dccd7f823c453a3239d0c23cc745e2adb37204c019cb4hc86t\" (UID: \"80d4af44-47ac-4fcf-a581-b22abd5ca264\") " pod="openstack-operators/8918bc7d41988dccd7f823c453a3239d0c23cc745e2adb37204c019cb4hc86t" Nov 21 13:59:52 crc kubenswrapper[5133]: I1121 13:59:52.267833 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/8918bc7d41988dccd7f823c453a3239d0c23cc745e2adb37204c019cb4hc86t" Nov 21 13:59:52 crc kubenswrapper[5133]: I1121 13:59:52.687989 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/8918bc7d41988dccd7f823c453a3239d0c23cc745e2adb37204c019cb4hc86t"] Nov 21 13:59:53 crc kubenswrapper[5133]: I1121 13:59:53.311498 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 13:59:53 crc kubenswrapper[5133]: I1121 13:59:53.312221 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 13:59:53 crc kubenswrapper[5133]: I1121 13:59:53.551119 5133 generic.go:334] "Generic (PLEG): container finished" podID="80d4af44-47ac-4fcf-a581-b22abd5ca264" containerID="f8abedff9aebdb952704f519fc81e34ff2b5fe0e055f9197b758189e40199ac8" exitCode=0 Nov 21 13:59:53 crc kubenswrapper[5133]: I1121 13:59:53.551170 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/8918bc7d41988dccd7f823c453a3239d0c23cc745e2adb37204c019cb4hc86t" event={"ID":"80d4af44-47ac-4fcf-a581-b22abd5ca264","Type":"ContainerDied","Data":"f8abedff9aebdb952704f519fc81e34ff2b5fe0e055f9197b758189e40199ac8"} Nov 21 13:59:53 crc kubenswrapper[5133]: I1121 13:59:53.551206 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/8918bc7d41988dccd7f823c453a3239d0c23cc745e2adb37204c019cb4hc86t" event={"ID":"80d4af44-47ac-4fcf-a581-b22abd5ca264","Type":"ContainerStarted","Data":"2ff7ec3de838b2769ce5b618facc486d5a5c49e707b0a78546b755ddd3b5a678"} Nov 21 13:59:54 crc kubenswrapper[5133]: I1121 13:59:54.567758 5133 generic.go:334] "Generic (PLEG): container finished" podID="80d4af44-47ac-4fcf-a581-b22abd5ca264" containerID="fcecb3099490880e8c224e772461f6e151769281bf7d6333cb5b9c3ba409bd92" exitCode=0 Nov 21 13:59:54 crc kubenswrapper[5133]: I1121 13:59:54.567884 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/8918bc7d41988dccd7f823c453a3239d0c23cc745e2adb37204c019cb4hc86t" event={"ID":"80d4af44-47ac-4fcf-a581-b22abd5ca264","Type":"ContainerDied","Data":"fcecb3099490880e8c224e772461f6e151769281bf7d6333cb5b9c3ba409bd92"} Nov 21 13:59:55 crc kubenswrapper[5133]: I1121 13:59:55.577809 5133 generic.go:334] "Generic (PLEG): container finished" podID="80d4af44-47ac-4fcf-a581-b22abd5ca264" containerID="c6e7892a4e18a9a4ff79770e96aca0e491570896dc42b3f50735e611e526b2a8" exitCode=0 Nov 21 13:59:55 crc kubenswrapper[5133]: I1121 13:59:55.577870 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/8918bc7d41988dccd7f823c453a3239d0c23cc745e2adb37204c019cb4hc86t" event={"ID":"80d4af44-47ac-4fcf-a581-b22abd5ca264","Type":"ContainerDied","Data":"c6e7892a4e18a9a4ff79770e96aca0e491570896dc42b3f50735e611e526b2a8"} Nov 21 13:59:56 crc kubenswrapper[5133]: I1121 13:59:56.837227 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/8918bc7d41988dccd7f823c453a3239d0c23cc745e2adb37204c019cb4hc86t" Nov 21 13:59:56 crc kubenswrapper[5133]: I1121 13:59:56.973651 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/80d4af44-47ac-4fcf-a581-b22abd5ca264-bundle\") pod \"80d4af44-47ac-4fcf-a581-b22abd5ca264\" (UID: \"80d4af44-47ac-4fcf-a581-b22abd5ca264\") " Nov 21 13:59:56 crc kubenswrapper[5133]: I1121 13:59:56.973780 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/80d4af44-47ac-4fcf-a581-b22abd5ca264-util\") pod \"80d4af44-47ac-4fcf-a581-b22abd5ca264\" (UID: \"80d4af44-47ac-4fcf-a581-b22abd5ca264\") " Nov 21 13:59:56 crc kubenswrapper[5133]: I1121 13:59:56.974183 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qxqsf\" (UniqueName: \"kubernetes.io/projected/80d4af44-47ac-4fcf-a581-b22abd5ca264-kube-api-access-qxqsf\") pod \"80d4af44-47ac-4fcf-a581-b22abd5ca264\" (UID: \"80d4af44-47ac-4fcf-a581-b22abd5ca264\") " Nov 21 13:59:56 crc kubenswrapper[5133]: I1121 13:59:56.975172 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/80d4af44-47ac-4fcf-a581-b22abd5ca264-bundle" (OuterVolumeSpecName: "bundle") pod "80d4af44-47ac-4fcf-a581-b22abd5ca264" (UID: "80d4af44-47ac-4fcf-a581-b22abd5ca264"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 13:59:56 crc kubenswrapper[5133]: I1121 13:59:56.982941 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/80d4af44-47ac-4fcf-a581-b22abd5ca264-kube-api-access-qxqsf" (OuterVolumeSpecName: "kube-api-access-qxqsf") pod "80d4af44-47ac-4fcf-a581-b22abd5ca264" (UID: "80d4af44-47ac-4fcf-a581-b22abd5ca264"). InnerVolumeSpecName "kube-api-access-qxqsf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 13:59:56 crc kubenswrapper[5133]: I1121 13:59:56.997561 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/80d4af44-47ac-4fcf-a581-b22abd5ca264-util" (OuterVolumeSpecName: "util") pod "80d4af44-47ac-4fcf-a581-b22abd5ca264" (UID: "80d4af44-47ac-4fcf-a581-b22abd5ca264"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 13:59:57 crc kubenswrapper[5133]: I1121 13:59:57.076177 5133 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/80d4af44-47ac-4fcf-a581-b22abd5ca264-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 13:59:57 crc kubenswrapper[5133]: I1121 13:59:57.076227 5133 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/80d4af44-47ac-4fcf-a581-b22abd5ca264-util\") on node \"crc\" DevicePath \"\"" Nov 21 13:59:57 crc kubenswrapper[5133]: I1121 13:59:57.076240 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qxqsf\" (UniqueName: \"kubernetes.io/projected/80d4af44-47ac-4fcf-a581-b22abd5ca264-kube-api-access-qxqsf\") on node \"crc\" DevicePath \"\"" Nov 21 13:59:57 crc kubenswrapper[5133]: I1121 13:59:57.592509 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/8918bc7d41988dccd7f823c453a3239d0c23cc745e2adb37204c019cb4hc86t" event={"ID":"80d4af44-47ac-4fcf-a581-b22abd5ca264","Type":"ContainerDied","Data":"2ff7ec3de838b2769ce5b618facc486d5a5c49e707b0a78546b755ddd3b5a678"} Nov 21 13:59:57 crc kubenswrapper[5133]: I1121 13:59:57.592564 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2ff7ec3de838b2769ce5b618facc486d5a5c49e707b0a78546b755ddd3b5a678" Nov 21 13:59:57 crc kubenswrapper[5133]: I1121 13:59:57.592678 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/8918bc7d41988dccd7f823c453a3239d0c23cc745e2adb37204c019cb4hc86t" Nov 21 14:00:00 crc kubenswrapper[5133]: I1121 14:00:00.145766 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395560-hlz6p"] Nov 21 14:00:00 crc kubenswrapper[5133]: E1121 14:00:00.146642 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80d4af44-47ac-4fcf-a581-b22abd5ca264" containerName="pull" Nov 21 14:00:00 crc kubenswrapper[5133]: I1121 14:00:00.146663 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="80d4af44-47ac-4fcf-a581-b22abd5ca264" containerName="pull" Nov 21 14:00:00 crc kubenswrapper[5133]: E1121 14:00:00.146683 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80d4af44-47ac-4fcf-a581-b22abd5ca264" containerName="util" Nov 21 14:00:00 crc kubenswrapper[5133]: I1121 14:00:00.146694 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="80d4af44-47ac-4fcf-a581-b22abd5ca264" containerName="util" Nov 21 14:00:00 crc kubenswrapper[5133]: E1121 14:00:00.146723 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80d4af44-47ac-4fcf-a581-b22abd5ca264" containerName="extract" Nov 21 14:00:00 crc kubenswrapper[5133]: I1121 14:00:00.146733 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="80d4af44-47ac-4fcf-a581-b22abd5ca264" containerName="extract" Nov 21 14:00:00 crc kubenswrapper[5133]: I1121 14:00:00.146932 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="80d4af44-47ac-4fcf-a581-b22abd5ca264" containerName="extract" Nov 21 14:00:00 crc kubenswrapper[5133]: I1121 14:00:00.147625 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395560-hlz6p" Nov 21 14:00:00 crc kubenswrapper[5133]: I1121 14:00:00.154577 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 21 14:00:00 crc kubenswrapper[5133]: I1121 14:00:00.155878 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395560-hlz6p"] Nov 21 14:00:00 crc kubenswrapper[5133]: I1121 14:00:00.163280 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 21 14:00:00 crc kubenswrapper[5133]: I1121 14:00:00.228518 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ptnts\" (UniqueName: \"kubernetes.io/projected/ac012817-c59e-4d3b-8d42-4cb7795b1034-kube-api-access-ptnts\") pod \"collect-profiles-29395560-hlz6p\" (UID: \"ac012817-c59e-4d3b-8d42-4cb7795b1034\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395560-hlz6p" Nov 21 14:00:00 crc kubenswrapper[5133]: I1121 14:00:00.228598 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ac012817-c59e-4d3b-8d42-4cb7795b1034-secret-volume\") pod \"collect-profiles-29395560-hlz6p\" (UID: \"ac012817-c59e-4d3b-8d42-4cb7795b1034\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395560-hlz6p" Nov 21 14:00:00 crc kubenswrapper[5133]: I1121 14:00:00.228726 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ac012817-c59e-4d3b-8d42-4cb7795b1034-config-volume\") pod \"collect-profiles-29395560-hlz6p\" (UID: \"ac012817-c59e-4d3b-8d42-4cb7795b1034\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395560-hlz6p" Nov 21 14:00:00 crc kubenswrapper[5133]: I1121 14:00:00.330169 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ptnts\" (UniqueName: \"kubernetes.io/projected/ac012817-c59e-4d3b-8d42-4cb7795b1034-kube-api-access-ptnts\") pod \"collect-profiles-29395560-hlz6p\" (UID: \"ac012817-c59e-4d3b-8d42-4cb7795b1034\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395560-hlz6p" Nov 21 14:00:00 crc kubenswrapper[5133]: I1121 14:00:00.330223 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ac012817-c59e-4d3b-8d42-4cb7795b1034-secret-volume\") pod \"collect-profiles-29395560-hlz6p\" (UID: \"ac012817-c59e-4d3b-8d42-4cb7795b1034\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395560-hlz6p" Nov 21 14:00:00 crc kubenswrapper[5133]: I1121 14:00:00.330291 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ac012817-c59e-4d3b-8d42-4cb7795b1034-config-volume\") pod \"collect-profiles-29395560-hlz6p\" (UID: \"ac012817-c59e-4d3b-8d42-4cb7795b1034\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395560-hlz6p" Nov 21 14:00:00 crc kubenswrapper[5133]: I1121 14:00:00.331200 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ac012817-c59e-4d3b-8d42-4cb7795b1034-config-volume\") pod \"collect-profiles-29395560-hlz6p\" (UID: \"ac012817-c59e-4d3b-8d42-4cb7795b1034\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395560-hlz6p" Nov 21 14:00:00 crc kubenswrapper[5133]: I1121 14:00:00.336192 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ac012817-c59e-4d3b-8d42-4cb7795b1034-secret-volume\") pod \"collect-profiles-29395560-hlz6p\" (UID: \"ac012817-c59e-4d3b-8d42-4cb7795b1034\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395560-hlz6p" Nov 21 14:00:00 crc kubenswrapper[5133]: I1121 14:00:00.352196 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ptnts\" (UniqueName: \"kubernetes.io/projected/ac012817-c59e-4d3b-8d42-4cb7795b1034-kube-api-access-ptnts\") pod \"collect-profiles-29395560-hlz6p\" (UID: \"ac012817-c59e-4d3b-8d42-4cb7795b1034\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395560-hlz6p" Nov 21 14:00:00 crc kubenswrapper[5133]: I1121 14:00:00.479632 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395560-hlz6p" Nov 21 14:00:00 crc kubenswrapper[5133]: I1121 14:00:00.698647 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395560-hlz6p"] Nov 21 14:00:01 crc kubenswrapper[5133]: I1121 14:00:01.626641 5133 generic.go:334] "Generic (PLEG): container finished" podID="ac012817-c59e-4d3b-8d42-4cb7795b1034" containerID="6f2fb749f68cbec474032f1b6521017cd777b9c458ecab821bce31f47ea71192" exitCode=0 Nov 21 14:00:01 crc kubenswrapper[5133]: I1121 14:00:01.626709 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395560-hlz6p" event={"ID":"ac012817-c59e-4d3b-8d42-4cb7795b1034","Type":"ContainerDied","Data":"6f2fb749f68cbec474032f1b6521017cd777b9c458ecab821bce31f47ea71192"} Nov 21 14:00:01 crc kubenswrapper[5133]: I1121 14:00:01.627109 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395560-hlz6p" event={"ID":"ac012817-c59e-4d3b-8d42-4cb7795b1034","Type":"ContainerStarted","Data":"c473dd9ad7323d7e3127af5b4064ee6152a946634214ce6e4536df4fa9a51949"} Nov 21 14:00:02 crc kubenswrapper[5133]: I1121 14:00:02.876054 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395560-hlz6p" Nov 21 14:00:02 crc kubenswrapper[5133]: I1121 14:00:02.971789 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ptnts\" (UniqueName: \"kubernetes.io/projected/ac012817-c59e-4d3b-8d42-4cb7795b1034-kube-api-access-ptnts\") pod \"ac012817-c59e-4d3b-8d42-4cb7795b1034\" (UID: \"ac012817-c59e-4d3b-8d42-4cb7795b1034\") " Nov 21 14:00:02 crc kubenswrapper[5133]: I1121 14:00:02.971859 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ac012817-c59e-4d3b-8d42-4cb7795b1034-secret-volume\") pod \"ac012817-c59e-4d3b-8d42-4cb7795b1034\" (UID: \"ac012817-c59e-4d3b-8d42-4cb7795b1034\") " Nov 21 14:00:02 crc kubenswrapper[5133]: I1121 14:00:02.971954 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ac012817-c59e-4d3b-8d42-4cb7795b1034-config-volume\") pod \"ac012817-c59e-4d3b-8d42-4cb7795b1034\" (UID: \"ac012817-c59e-4d3b-8d42-4cb7795b1034\") " Nov 21 14:00:02 crc kubenswrapper[5133]: I1121 14:00:02.972810 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ac012817-c59e-4d3b-8d42-4cb7795b1034-config-volume" (OuterVolumeSpecName: "config-volume") pod "ac012817-c59e-4d3b-8d42-4cb7795b1034" (UID: "ac012817-c59e-4d3b-8d42-4cb7795b1034"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:00:02 crc kubenswrapper[5133]: I1121 14:00:02.990270 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ac012817-c59e-4d3b-8d42-4cb7795b1034-kube-api-access-ptnts" (OuterVolumeSpecName: "kube-api-access-ptnts") pod "ac012817-c59e-4d3b-8d42-4cb7795b1034" (UID: "ac012817-c59e-4d3b-8d42-4cb7795b1034"). InnerVolumeSpecName "kube-api-access-ptnts". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:00:02 crc kubenswrapper[5133]: I1121 14:00:02.992040 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ac012817-c59e-4d3b-8d42-4cb7795b1034-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "ac012817-c59e-4d3b-8d42-4cb7795b1034" (UID: "ac012817-c59e-4d3b-8d42-4cb7795b1034"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:00:03 crc kubenswrapper[5133]: I1121 14:00:03.073961 5133 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ac012817-c59e-4d3b-8d42-4cb7795b1034-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 21 14:00:03 crc kubenswrapper[5133]: I1121 14:00:03.074019 5133 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ac012817-c59e-4d3b-8d42-4cb7795b1034-config-volume\") on node \"crc\" DevicePath \"\"" Nov 21 14:00:03 crc kubenswrapper[5133]: I1121 14:00:03.074033 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ptnts\" (UniqueName: \"kubernetes.io/projected/ac012817-c59e-4d3b-8d42-4cb7795b1034-kube-api-access-ptnts\") on node \"crc\" DevicePath \"\"" Nov 21 14:00:03 crc kubenswrapper[5133]: I1121 14:00:03.641215 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395560-hlz6p" event={"ID":"ac012817-c59e-4d3b-8d42-4cb7795b1034","Type":"ContainerDied","Data":"c473dd9ad7323d7e3127af5b4064ee6152a946634214ce6e4536df4fa9a51949"} Nov 21 14:00:03 crc kubenswrapper[5133]: I1121 14:00:03.641272 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c473dd9ad7323d7e3127af5b4064ee6152a946634214ce6e4536df4fa9a51949" Nov 21 14:00:03 crc kubenswrapper[5133]: I1121 14:00:03.641267 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395560-hlz6p" Nov 21 14:00:04 crc kubenswrapper[5133]: I1121 14:00:04.874229 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-operator-6d9c469469-jzjfk"] Nov 21 14:00:04 crc kubenswrapper[5133]: E1121 14:00:04.876645 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac012817-c59e-4d3b-8d42-4cb7795b1034" containerName="collect-profiles" Nov 21 14:00:04 crc kubenswrapper[5133]: I1121 14:00:04.876742 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac012817-c59e-4d3b-8d42-4cb7795b1034" containerName="collect-profiles" Nov 21 14:00:04 crc kubenswrapper[5133]: I1121 14:00:04.876989 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="ac012817-c59e-4d3b-8d42-4cb7795b1034" containerName="collect-profiles" Nov 21 14:00:04 crc kubenswrapper[5133]: I1121 14:00:04.877716 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-6d9c469469-jzjfk" Nov 21 14:00:04 crc kubenswrapper[5133]: I1121 14:00:04.880601 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-operator-dockercfg-r8plq" Nov 21 14:00:04 crc kubenswrapper[5133]: I1121 14:00:04.899552 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-6d9c469469-jzjfk"] Nov 21 14:00:05 crc kubenswrapper[5133]: I1121 14:00:05.004492 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2x5bz\" (UniqueName: \"kubernetes.io/projected/93d86db4-2649-43aa-b252-6a288a505bec-kube-api-access-2x5bz\") pod \"openstack-operator-controller-operator-6d9c469469-jzjfk\" (UID: \"93d86db4-2649-43aa-b252-6a288a505bec\") " pod="openstack-operators/openstack-operator-controller-operator-6d9c469469-jzjfk" Nov 21 14:00:05 crc kubenswrapper[5133]: I1121 14:00:05.106145 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2x5bz\" (UniqueName: \"kubernetes.io/projected/93d86db4-2649-43aa-b252-6a288a505bec-kube-api-access-2x5bz\") pod \"openstack-operator-controller-operator-6d9c469469-jzjfk\" (UID: \"93d86db4-2649-43aa-b252-6a288a505bec\") " pod="openstack-operators/openstack-operator-controller-operator-6d9c469469-jzjfk" Nov 21 14:00:05 crc kubenswrapper[5133]: I1121 14:00:05.125408 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2x5bz\" (UniqueName: \"kubernetes.io/projected/93d86db4-2649-43aa-b252-6a288a505bec-kube-api-access-2x5bz\") pod \"openstack-operator-controller-operator-6d9c469469-jzjfk\" (UID: \"93d86db4-2649-43aa-b252-6a288a505bec\") " pod="openstack-operators/openstack-operator-controller-operator-6d9c469469-jzjfk" Nov 21 14:00:05 crc kubenswrapper[5133]: I1121 14:00:05.213070 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-6d9c469469-jzjfk" Nov 21 14:00:05 crc kubenswrapper[5133]: I1121 14:00:05.498394 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-6d9c469469-jzjfk"] Nov 21 14:00:05 crc kubenswrapper[5133]: I1121 14:00:05.655875 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-6d9c469469-jzjfk" event={"ID":"93d86db4-2649-43aa-b252-6a288a505bec","Type":"ContainerStarted","Data":"d00d88358e17b2485919299e9bc75b665ffe3abe48598508e2f82b02d49e84e1"} Nov 21 14:00:10 crc kubenswrapper[5133]: I1121 14:00:10.689214 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-6d9c469469-jzjfk" event={"ID":"93d86db4-2649-43aa-b252-6a288a505bec","Type":"ContainerStarted","Data":"5291a9a5f82513cff68947bce7325346e9abda95ac2e9f3decdae5df17df3a7a"} Nov 21 14:00:10 crc kubenswrapper[5133]: I1121 14:00:10.689716 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-operator-6d9c469469-jzjfk" Nov 21 14:00:10 crc kubenswrapper[5133]: I1121 14:00:10.727417 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-operator-6d9c469469-jzjfk" podStartSLOduration=2.261476418 podStartE2EDuration="6.7273911s" podCreationTimestamp="2025-11-21 14:00:04 +0000 UTC" firstStartedPulling="2025-11-21 14:00:05.517204493 +0000 UTC m=+1065.315036741" lastFinishedPulling="2025-11-21 14:00:09.983119165 +0000 UTC m=+1069.780951423" observedRunningTime="2025-11-21 14:00:10.721965865 +0000 UTC m=+1070.519798113" watchObservedRunningTime="2025-11-21 14:00:10.7273911 +0000 UTC m=+1070.525223348" Nov 21 14:00:15 crc kubenswrapper[5133]: I1121 14:00:15.216567 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-operator-6d9c469469-jzjfk" Nov 21 14:00:23 crc kubenswrapper[5133]: I1121 14:00:23.310917 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 14:00:23 crc kubenswrapper[5133]: I1121 14:00:23.311799 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 14:00:23 crc kubenswrapper[5133]: I1121 14:00:23.311861 5133 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" Nov 21 14:00:23 crc kubenswrapper[5133]: I1121 14:00:23.312426 5133 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"aeb58eef34dec1c617396ba99663df9426cb6865e9e12764d8c5ffc72116a175"} pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 14:00:23 crc kubenswrapper[5133]: I1121 14:00:23.312487 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" containerID="cri-o://aeb58eef34dec1c617396ba99663df9426cb6865e9e12764d8c5ffc72116a175" gracePeriod=600 Nov 21 14:00:23 crc kubenswrapper[5133]: I1121 14:00:23.782963 5133 generic.go:334] "Generic (PLEG): container finished" podID="52f5a729-05d1-4f84-a216-1df3233af57d" containerID="aeb58eef34dec1c617396ba99663df9426cb6865e9e12764d8c5ffc72116a175" exitCode=0 Nov 21 14:00:23 crc kubenswrapper[5133]: I1121 14:00:23.783047 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" event={"ID":"52f5a729-05d1-4f84-a216-1df3233af57d","Type":"ContainerDied","Data":"aeb58eef34dec1c617396ba99663df9426cb6865e9e12764d8c5ffc72116a175"} Nov 21 14:00:23 crc kubenswrapper[5133]: I1121 14:00:23.783360 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" event={"ID":"52f5a729-05d1-4f84-a216-1df3233af57d","Type":"ContainerStarted","Data":"5883b2ffbdc225f8fc9c34f308aadba5798cb012e313b8c25cad57a148a69dbc"} Nov 21 14:00:23 crc kubenswrapper[5133]: I1121 14:00:23.783389 5133 scope.go:117] "RemoveContainer" containerID="8270d91a0dc8e867dadddbaf1606a312bc821670bb959c0005ca6906c1d15307" Nov 21 14:00:37 crc kubenswrapper[5133]: I1121 14:00:37.926444 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-86dc4d89c8-znxpd"] Nov 21 14:00:37 crc kubenswrapper[5133]: I1121 14:00:37.938130 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-znxpd" Nov 21 14:00:37 crc kubenswrapper[5133]: I1121 14:00:37.943485 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-86dc4d89c8-znxpd"] Nov 21 14:00:37 crc kubenswrapper[5133]: I1121 14:00:37.949839 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-jfv2l" Nov 21 14:00:37 crc kubenswrapper[5133]: I1121 14:00:37.953575 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-5c575964b7-fcp4p"] Nov 21 14:00:37 crc kubenswrapper[5133]: I1121 14:00:37.955131 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-5c575964b7-fcp4p" Nov 21 14:00:37 crc kubenswrapper[5133]: I1121 14:00:37.964461 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-prdkw" Nov 21 14:00:37 crc kubenswrapper[5133]: I1121 14:00:37.973035 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-7d695c9b56-l7bft"] Nov 21 14:00:37 crc kubenswrapper[5133]: I1121 14:00:37.982243 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-l7bft" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.003048 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-zgssf" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.007159 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-68b95954c9-g6kng"] Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.019317 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-g6kng" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.025172 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-m4ccn" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.046169 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-7d695c9b56-l7bft"] Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.066677 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hm5x6\" (UniqueName: \"kubernetes.io/projected/99e612e9-ba8d-41cd-9654-4332a4132c4f-kube-api-access-hm5x6\") pod \"barbican-operator-controller-manager-86dc4d89c8-znxpd\" (UID: \"99e612e9-ba8d-41cd-9654-4332a4132c4f\") " pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-znxpd" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.067100 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wh692\" (UniqueName: \"kubernetes.io/projected/50fccca4-2734-4839-83b0-d220b0dfa1d6-kube-api-access-wh692\") pod \"cinder-operator-controller-manager-5c575964b7-fcp4p\" (UID: \"50fccca4-2734-4839-83b0-d220b0dfa1d6\") " pod="openstack-operators/cinder-operator-controller-manager-5c575964b7-fcp4p" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.069324 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-5c575964b7-fcp4p"] Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.087297 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-68b95954c9-g6kng"] Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.092381 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-68c9694994-4h6lz"] Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.093771 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-4h6lz" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.113226 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-nfm9m" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.116084 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-774b86978c-q9q7l"] Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.119317 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-774b86978c-q9q7l" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.133756 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-wznpr" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.144424 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-68c9694994-4h6lz"] Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.169170 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g2v6f\" (UniqueName: \"kubernetes.io/projected/abdbcac3-a654-4558-b71c-6b0a4d6c9c19-kube-api-access-g2v6f\") pod \"glance-operator-controller-manager-68b95954c9-g6kng\" (UID: \"abdbcac3-a654-4558-b71c-6b0a4d6c9c19\") " pod="openstack-operators/glance-operator-controller-manager-68b95954c9-g6kng" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.169242 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wh692\" (UniqueName: \"kubernetes.io/projected/50fccca4-2734-4839-83b0-d220b0dfa1d6-kube-api-access-wh692\") pod \"cinder-operator-controller-manager-5c575964b7-fcp4p\" (UID: \"50fccca4-2734-4839-83b0-d220b0dfa1d6\") " pod="openstack-operators/cinder-operator-controller-manager-5c575964b7-fcp4p" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.169333 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hm5x6\" (UniqueName: \"kubernetes.io/projected/99e612e9-ba8d-41cd-9654-4332a4132c4f-kube-api-access-hm5x6\") pod \"barbican-operator-controller-manager-86dc4d89c8-znxpd\" (UID: \"99e612e9-ba8d-41cd-9654-4332a4132c4f\") " pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-znxpd" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.169359 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-br6wb\" (UniqueName: \"kubernetes.io/projected/fbcf38a1-6730-49b5-bf19-9966465f2d1b-kube-api-access-br6wb\") pod \"designate-operator-controller-manager-7d695c9b56-l7bft\" (UID: \"fbcf38a1-6730-49b5-bf19-9966465f2d1b\") " pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-l7bft" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.190196 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-774b86978c-q9q7l"] Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.224141 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hm5x6\" (UniqueName: \"kubernetes.io/projected/99e612e9-ba8d-41cd-9654-4332a4132c4f-kube-api-access-hm5x6\") pod \"barbican-operator-controller-manager-86dc4d89c8-znxpd\" (UID: \"99e612e9-ba8d-41cd-9654-4332a4132c4f\") " pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-znxpd" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.227332 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-d5cc86f4b-7g4lm"] Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.229307 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-7g4lm" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.230892 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wh692\" (UniqueName: \"kubernetes.io/projected/50fccca4-2734-4839-83b0-d220b0dfa1d6-kube-api-access-wh692\") pod \"cinder-operator-controller-manager-5c575964b7-fcp4p\" (UID: \"50fccca4-2734-4839-83b0-d220b0dfa1d6\") " pod="openstack-operators/cinder-operator-controller-manager-5c575964b7-fcp4p" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.233972 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-5qj7b" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.234878 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.259843 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-d5cc86f4b-7g4lm"] Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.266813 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-znxpd" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.270231 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tvhzz\" (UniqueName: \"kubernetes.io/projected/701735b8-41e5-47f9-9ac7-b8cfc0357597-kube-api-access-tvhzz\") pod \"horizon-operator-controller-manager-68c9694994-4h6lz\" (UID: \"701735b8-41e5-47f9-9ac7-b8cfc0357597\") " pod="openstack-operators/horizon-operator-controller-manager-68c9694994-4h6lz" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.270335 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rjwhp\" (UniqueName: \"kubernetes.io/projected/156c8963-8a3a-4cb2-93ca-a14ab1e2c4a8-kube-api-access-rjwhp\") pod \"heat-operator-controller-manager-774b86978c-q9q7l\" (UID: \"156c8963-8a3a-4cb2-93ca-a14ab1e2c4a8\") " pod="openstack-operators/heat-operator-controller-manager-774b86978c-q9q7l" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.270392 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-br6wb\" (UniqueName: \"kubernetes.io/projected/fbcf38a1-6730-49b5-bf19-9966465f2d1b-kube-api-access-br6wb\") pod \"designate-operator-controller-manager-7d695c9b56-l7bft\" (UID: \"fbcf38a1-6730-49b5-bf19-9966465f2d1b\") " pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-l7bft" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.270461 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g2v6f\" (UniqueName: \"kubernetes.io/projected/abdbcac3-a654-4558-b71c-6b0a4d6c9c19-kube-api-access-g2v6f\") pod \"glance-operator-controller-manager-68b95954c9-g6kng\" (UID: \"abdbcac3-a654-4558-b71c-6b0a4d6c9c19\") " pod="openstack-operators/glance-operator-controller-manager-68b95954c9-g6kng" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.293796 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-br6wb\" (UniqueName: \"kubernetes.io/projected/fbcf38a1-6730-49b5-bf19-9966465f2d1b-kube-api-access-br6wb\") pod \"designate-operator-controller-manager-7d695c9b56-l7bft\" (UID: \"fbcf38a1-6730-49b5-bf19-9966465f2d1b\") " pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-l7bft" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.297457 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-5c575964b7-fcp4p" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.298194 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-5bfcdc958c-glgx8"] Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.300058 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-glgx8" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.300160 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g2v6f\" (UniqueName: \"kubernetes.io/projected/abdbcac3-a654-4558-b71c-6b0a4d6c9c19-kube-api-access-g2v6f\") pod \"glance-operator-controller-manager-68b95954c9-g6kng\" (UID: \"abdbcac3-a654-4558-b71c-6b0a4d6c9c19\") " pod="openstack-operators/glance-operator-controller-manager-68b95954c9-g6kng" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.302717 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-tnpj6" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.307941 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-l7bft" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.315246 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-5bfcdc958c-glgx8"] Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.342080 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-748dc6576f-gqfpz"] Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.343335 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-gqfpz" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.351194 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-v7c65" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.352095 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-58bb8d67cc-mj6sx"] Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.353863 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-mj6sx" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.355630 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-g6kng" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.356681 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-748dc6576f-gqfpz"] Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.361103 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-snvqv" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.364190 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-58bb8d67cc-mj6sx"] Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.372054 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rzhtm\" (UniqueName: \"kubernetes.io/projected/0b72b567-d244-4d7e-984b-ba42dfb7be25-kube-api-access-rzhtm\") pod \"infra-operator-controller-manager-d5cc86f4b-7g4lm\" (UID: \"0b72b567-d244-4d7e-984b-ba42dfb7be25\") " pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-7g4lm" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.372132 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tvhzz\" (UniqueName: \"kubernetes.io/projected/701735b8-41e5-47f9-9ac7-b8cfc0357597-kube-api-access-tvhzz\") pod \"horizon-operator-controller-manager-68c9694994-4h6lz\" (UID: \"701735b8-41e5-47f9-9ac7-b8cfc0357597\") " pod="openstack-operators/horizon-operator-controller-manager-68c9694994-4h6lz" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.372187 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/0b72b567-d244-4d7e-984b-ba42dfb7be25-cert\") pod \"infra-operator-controller-manager-d5cc86f4b-7g4lm\" (UID: \"0b72b567-d244-4d7e-984b-ba42dfb7be25\") " pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-7g4lm" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.372218 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rjwhp\" (UniqueName: \"kubernetes.io/projected/156c8963-8a3a-4cb2-93ca-a14ab1e2c4a8-kube-api-access-rjwhp\") pod \"heat-operator-controller-manager-774b86978c-q9q7l\" (UID: \"156c8963-8a3a-4cb2-93ca-a14ab1e2c4a8\") " pod="openstack-operators/heat-operator-controller-manager-774b86978c-q9q7l" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.376662 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-mwsrn"] Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.378536 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-mwsrn" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.381028 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-vt2kw"] Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.382418 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-vt2kw" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.391269 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-rfnww" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.398089 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-mwsrn"] Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.401892 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-ppplk" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.409467 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-79556f57fc-vpjk7"] Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.410987 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-vpjk7" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.426059 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-fd75fd47d-mxphc"] Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.427392 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-mxphc" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.431218 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-vt2kw"] Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.436524 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-n57x7" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.442320 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-nqkww" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.442606 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tvhzz\" (UniqueName: \"kubernetes.io/projected/701735b8-41e5-47f9-9ac7-b8cfc0357597-kube-api-access-tvhzz\") pod \"horizon-operator-controller-manager-68c9694994-4h6lz\" (UID: \"701735b8-41e5-47f9-9ac7-b8cfc0357597\") " pod="openstack-operators/horizon-operator-controller-manager-68c9694994-4h6lz" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.444827 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rjwhp\" (UniqueName: \"kubernetes.io/projected/156c8963-8a3a-4cb2-93ca-a14ab1e2c4a8-kube-api-access-rjwhp\") pod \"heat-operator-controller-manager-774b86978c-q9q7l\" (UID: \"156c8963-8a3a-4cb2-93ca-a14ab1e2c4a8\") " pod="openstack-operators/heat-operator-controller-manager-774b86978c-q9q7l" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.452752 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-4h6lz" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.470920 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-79556f57fc-vpjk7"] Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.479050 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7qmxk\" (UniqueName: \"kubernetes.io/projected/28c76d9f-b15c-4b67-b616-88f89fea7eb7-kube-api-access-7qmxk\") pod \"keystone-operator-controller-manager-748dc6576f-gqfpz\" (UID: \"28c76d9f-b15c-4b67-b616-88f89fea7eb7\") " pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-gqfpz" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.479106 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bblpw\" (UniqueName: \"kubernetes.io/projected/d1eae28c-1654-4cfc-a380-c56e52bdd2d5-kube-api-access-bblpw\") pod \"ironic-operator-controller-manager-5bfcdc958c-glgx8\" (UID: \"d1eae28c-1654-4cfc-a380-c56e52bdd2d5\") " pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-glgx8" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.479203 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rzhtm\" (UniqueName: \"kubernetes.io/projected/0b72b567-d244-4d7e-984b-ba42dfb7be25-kube-api-access-rzhtm\") pod \"infra-operator-controller-manager-d5cc86f4b-7g4lm\" (UID: \"0b72b567-d244-4d7e-984b-ba42dfb7be25\") " pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-7g4lm" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.479272 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bck54\" (UniqueName: \"kubernetes.io/projected/641bce51-c8fb-4956-a13d-e42d7204b3d2-kube-api-access-bck54\") pod \"manila-operator-controller-manager-58bb8d67cc-mj6sx\" (UID: \"641bce51-c8fb-4956-a13d-e42d7204b3d2\") " pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-mj6sx" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.479331 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jpplc\" (UniqueName: \"kubernetes.io/projected/2da1ddc9-a310-4a6a-90de-b63c8b33448a-kube-api-access-jpplc\") pod \"neutron-operator-controller-manager-7c57c8bbc4-vt2kw\" (UID: \"2da1ddc9-a310-4a6a-90de-b63c8b33448a\") " pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-vt2kw" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.479356 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j69bb\" (UniqueName: \"kubernetes.io/projected/a7a7378a-ba96-4a74-9730-61c7e0215843-kube-api-access-j69bb\") pod \"mariadb-operator-controller-manager-cb6c4fdb7-mwsrn\" (UID: \"a7a7378a-ba96-4a74-9730-61c7e0215843\") " pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-mwsrn" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.479396 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/0b72b567-d244-4d7e-984b-ba42dfb7be25-cert\") pod \"infra-operator-controller-manager-d5cc86f4b-7g4lm\" (UID: \"0b72b567-d244-4d7e-984b-ba42dfb7be25\") " pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-7g4lm" Nov 21 14:00:38 crc kubenswrapper[5133]: E1121 14:00:38.479589 5133 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 21 14:00:38 crc kubenswrapper[5133]: E1121 14:00:38.479665 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0b72b567-d244-4d7e-984b-ba42dfb7be25-cert podName:0b72b567-d244-4d7e-984b-ba42dfb7be25 nodeName:}" failed. No retries permitted until 2025-11-21 14:00:38.979638532 +0000 UTC m=+1098.777470780 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/0b72b567-d244-4d7e-984b-ba42dfb7be25-cert") pod "infra-operator-controller-manager-d5cc86f4b-7g4lm" (UID: "0b72b567-d244-4d7e-984b-ba42dfb7be25") : secret "infra-operator-webhook-server-cert" not found Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.490668 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-774b86978c-q9q7l" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.530478 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rzhtm\" (UniqueName: \"kubernetes.io/projected/0b72b567-d244-4d7e-984b-ba42dfb7be25-kube-api-access-rzhtm\") pod \"infra-operator-controller-manager-d5cc86f4b-7g4lm\" (UID: \"0b72b567-d244-4d7e-984b-ba42dfb7be25\") " pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-7g4lm" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.581226 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7qmxk\" (UniqueName: \"kubernetes.io/projected/28c76d9f-b15c-4b67-b616-88f89fea7eb7-kube-api-access-7qmxk\") pod \"keystone-operator-controller-manager-748dc6576f-gqfpz\" (UID: \"28c76d9f-b15c-4b67-b616-88f89fea7eb7\") " pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-gqfpz" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.581481 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bblpw\" (UniqueName: \"kubernetes.io/projected/d1eae28c-1654-4cfc-a380-c56e52bdd2d5-kube-api-access-bblpw\") pod \"ironic-operator-controller-manager-5bfcdc958c-glgx8\" (UID: \"d1eae28c-1654-4cfc-a380-c56e52bdd2d5\") " pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-glgx8" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.581536 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b6wh4\" (UniqueName: \"kubernetes.io/projected/2b5268ec-12ae-4aee-84f2-f176c3e8f1c3-kube-api-access-b6wh4\") pod \"nova-operator-controller-manager-79556f57fc-vpjk7\" (UID: \"2b5268ec-12ae-4aee-84f2-f176c3e8f1c3\") " pod="openstack-operators/nova-operator-controller-manager-79556f57fc-vpjk7" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.581584 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bck54\" (UniqueName: \"kubernetes.io/projected/641bce51-c8fb-4956-a13d-e42d7204b3d2-kube-api-access-bck54\") pod \"manila-operator-controller-manager-58bb8d67cc-mj6sx\" (UID: \"641bce51-c8fb-4956-a13d-e42d7204b3d2\") " pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-mj6sx" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.583163 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jpplc\" (UniqueName: \"kubernetes.io/projected/2da1ddc9-a310-4a6a-90de-b63c8b33448a-kube-api-access-jpplc\") pod \"neutron-operator-controller-manager-7c57c8bbc4-vt2kw\" (UID: \"2da1ddc9-a310-4a6a-90de-b63c8b33448a\") " pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-vt2kw" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.583194 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j69bb\" (UniqueName: \"kubernetes.io/projected/a7a7378a-ba96-4a74-9730-61c7e0215843-kube-api-access-j69bb\") pod \"mariadb-operator-controller-manager-cb6c4fdb7-mwsrn\" (UID: \"a7a7378a-ba96-4a74-9730-61c7e0215843\") " pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-mwsrn" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.583256 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cb7db\" (UniqueName: \"kubernetes.io/projected/a076ae54-b994-453a-9361-2bf9acab8d2d-kube-api-access-cb7db\") pod \"octavia-operator-controller-manager-fd75fd47d-mxphc\" (UID: \"a076ae54-b994-453a-9361-2bf9acab8d2d\") " pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-mxphc" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.645276 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j69bb\" (UniqueName: \"kubernetes.io/projected/a7a7378a-ba96-4a74-9730-61c7e0215843-kube-api-access-j69bb\") pod \"mariadb-operator-controller-manager-cb6c4fdb7-mwsrn\" (UID: \"a7a7378a-ba96-4a74-9730-61c7e0215843\") " pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-mwsrn" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.656925 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-fd75fd47d-mxphc"] Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.657245 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-5z85m"] Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.658381 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-66cf5c67ff-tzn9t"] Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.662721 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jpplc\" (UniqueName: \"kubernetes.io/projected/2da1ddc9-a310-4a6a-90de-b63c8b33448a-kube-api-access-jpplc\") pod \"neutron-operator-controller-manager-7c57c8bbc4-vt2kw\" (UID: \"2da1ddc9-a310-4a6a-90de-b63c8b33448a\") " pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-vt2kw" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.664329 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7qmxk\" (UniqueName: \"kubernetes.io/projected/28c76d9f-b15c-4b67-b616-88f89fea7eb7-kube-api-access-7qmxk\") pod \"keystone-operator-controller-manager-748dc6576f-gqfpz\" (UID: \"28c76d9f-b15c-4b67-b616-88f89fea7eb7\") " pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-gqfpz" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.665869 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-66cf5c67ff-tzn9t"] Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.665980 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-5z85m"] Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.666262 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bblpw\" (UniqueName: \"kubernetes.io/projected/d1eae28c-1654-4cfc-a380-c56e52bdd2d5-kube-api-access-bblpw\") pod \"ironic-operator-controller-manager-5bfcdc958c-glgx8\" (UID: \"d1eae28c-1654-4cfc-a380-c56e52bdd2d5\") " pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-glgx8" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.666471 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-5z85m" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.666537 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-tzn9t" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.671066 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-5hflr" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.673365 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-rssv2" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.673624 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.688159 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-5db546f9d9-kkk8d"] Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.694873 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b6wh4\" (UniqueName: \"kubernetes.io/projected/2b5268ec-12ae-4aee-84f2-f176c3e8f1c3-kube-api-access-b6wh4\") pod \"nova-operator-controller-manager-79556f57fc-vpjk7\" (UID: \"2b5268ec-12ae-4aee-84f2-f176c3e8f1c3\") " pod="openstack-operators/nova-operator-controller-manager-79556f57fc-vpjk7" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.695009 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cb7db\" (UniqueName: \"kubernetes.io/projected/a076ae54-b994-453a-9361-2bf9acab8d2d-kube-api-access-cb7db\") pod \"octavia-operator-controller-manager-fd75fd47d-mxphc\" (UID: \"a076ae54-b994-453a-9361-2bf9acab8d2d\") " pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-mxphc" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.701647 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-kkk8d" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.708826 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bck54\" (UniqueName: \"kubernetes.io/projected/641bce51-c8fb-4956-a13d-e42d7204b3d2-kube-api-access-bck54\") pod \"manila-operator-controller-manager-58bb8d67cc-mj6sx\" (UID: \"641bce51-c8fb-4956-a13d-e42d7204b3d2\") " pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-mj6sx" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.709412 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-glgx8" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.712428 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-t2t9l" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.727628 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-5db546f9d9-kkk8d"] Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.741780 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-6fdc4fcf86-vpq2d"] Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.743544 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-vpq2d" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.774800 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-gqfpz" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.775243 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cb7db\" (UniqueName: \"kubernetes.io/projected/a076ae54-b994-453a-9361-2bf9acab8d2d-kube-api-access-cb7db\") pod \"octavia-operator-controller-manager-fd75fd47d-mxphc\" (UID: \"a076ae54-b994-453a-9361-2bf9acab8d2d\") " pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-mxphc" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.775792 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b6wh4\" (UniqueName: \"kubernetes.io/projected/2b5268ec-12ae-4aee-84f2-f176c3e8f1c3-kube-api-access-b6wh4\") pod \"nova-operator-controller-manager-79556f57fc-vpjk7\" (UID: \"2b5268ec-12ae-4aee-84f2-f176c3e8f1c3\") " pod="openstack-operators/nova-operator-controller-manager-79556f57fc-vpjk7" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.780189 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-pthmp" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.796097 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-mj6sx" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.818286 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-567f98c9d-772lb"] Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.820551 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-772lb" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.823880 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-zkhbx" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.841630 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-njvlq\" (UniqueName: \"kubernetes.io/projected/71d8bd8e-380d-458b-93b8-7e1a68964294-kube-api-access-njvlq\") pod \"ovn-operator-controller-manager-66cf5c67ff-tzn9t\" (UID: \"71d8bd8e-380d-458b-93b8-7e1a68964294\") " pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-tzn9t" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.847145 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/2036671e-8670-48c3-af60-5eee8087efa7-cert\") pod \"openstack-baremetal-operator-controller-manager-544b9bb9-5z85m\" (UID: \"2036671e-8670-48c3-af60-5eee8087efa7\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-5z85m" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.847406 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5chmq\" (UniqueName: \"kubernetes.io/projected/3561dc03-64fa-4b51-b14c-02ef7dc87280-kube-api-access-5chmq\") pod \"placement-operator-controller-manager-5db546f9d9-kkk8d\" (UID: \"3561dc03-64fa-4b51-b14c-02ef7dc87280\") " pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-kkk8d" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.848300 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x99rz\" (UniqueName: \"kubernetes.io/projected/2036671e-8670-48c3-af60-5eee8087efa7-kube-api-access-x99rz\") pod \"openstack-baremetal-operator-controller-manager-544b9bb9-5z85m\" (UID: \"2036671e-8670-48c3-af60-5eee8087efa7\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-5z85m" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.858751 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-mwsrn" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.867152 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-567f98c9d-772lb"] Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.876280 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-vt2kw" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.911085 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-6fdc4fcf86-vpq2d"] Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.954230 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-vpjk7" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.959670 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-njvlq\" (UniqueName: \"kubernetes.io/projected/71d8bd8e-380d-458b-93b8-7e1a68964294-kube-api-access-njvlq\") pod \"ovn-operator-controller-manager-66cf5c67ff-tzn9t\" (UID: \"71d8bd8e-380d-458b-93b8-7e1a68964294\") " pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-tzn9t" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.959711 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/2036671e-8670-48c3-af60-5eee8087efa7-cert\") pod \"openstack-baremetal-operator-controller-manager-544b9bb9-5z85m\" (UID: \"2036671e-8670-48c3-af60-5eee8087efa7\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-5z85m" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.959755 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vpl42\" (UniqueName: \"kubernetes.io/projected/167387d2-835f-4d70-8a59-de71037b8178-kube-api-access-vpl42\") pod \"swift-operator-controller-manager-6fdc4fcf86-vpq2d\" (UID: \"167387d2-835f-4d70-8a59-de71037b8178\") " pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-vpq2d" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.959787 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5chmq\" (UniqueName: \"kubernetes.io/projected/3561dc03-64fa-4b51-b14c-02ef7dc87280-kube-api-access-5chmq\") pod \"placement-operator-controller-manager-5db546f9d9-kkk8d\" (UID: \"3561dc03-64fa-4b51-b14c-02ef7dc87280\") " pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-kkk8d" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.959853 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jlzz5\" (UniqueName: \"kubernetes.io/projected/6f45172d-e807-4bb7-b836-a2a6a53beccf-kube-api-access-jlzz5\") pod \"telemetry-operator-controller-manager-567f98c9d-772lb\" (UID: \"6f45172d-e807-4bb7-b836-a2a6a53beccf\") " pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-772lb" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.959913 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x99rz\" (UniqueName: \"kubernetes.io/projected/2036671e-8670-48c3-af60-5eee8087efa7-kube-api-access-x99rz\") pod \"openstack-baremetal-operator-controller-manager-544b9bb9-5z85m\" (UID: \"2036671e-8670-48c3-af60-5eee8087efa7\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-5z85m" Nov 21 14:00:38 crc kubenswrapper[5133]: E1121 14:00:38.960506 5133 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 21 14:00:38 crc kubenswrapper[5133]: E1121 14:00:38.960572 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2036671e-8670-48c3-af60-5eee8087efa7-cert podName:2036671e-8670-48c3-af60-5eee8087efa7 nodeName:}" failed. No retries permitted until 2025-11-21 14:00:39.460551904 +0000 UTC m=+1099.258384152 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/2036671e-8670-48c3-af60-5eee8087efa7-cert") pod "openstack-baremetal-operator-controller-manager-544b9bb9-5z85m" (UID: "2036671e-8670-48c3-af60-5eee8087efa7") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.960835 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-mxphc" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.973284 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-5cb74df96-448nh"] Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.987750 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-njvlq\" (UniqueName: \"kubernetes.io/projected/71d8bd8e-380d-458b-93b8-7e1a68964294-kube-api-access-njvlq\") pod \"ovn-operator-controller-manager-66cf5c67ff-tzn9t\" (UID: \"71d8bd8e-380d-458b-93b8-7e1a68964294\") " pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-tzn9t" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.990321 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-5cb74df96-448nh"] Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.990473 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5cb74df96-448nh" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.995907 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x99rz\" (UniqueName: \"kubernetes.io/projected/2036671e-8670-48c3-af60-5eee8087efa7-kube-api-access-x99rz\") pod \"openstack-baremetal-operator-controller-manager-544b9bb9-5z85m\" (UID: \"2036671e-8670-48c3-af60-5eee8087efa7\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-5z85m" Nov 21 14:00:38 crc kubenswrapper[5133]: I1121 14:00:38.999634 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-bpkzp" Nov 21 14:00:39 crc kubenswrapper[5133]: I1121 14:00:39.013146 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5chmq\" (UniqueName: \"kubernetes.io/projected/3561dc03-64fa-4b51-b14c-02ef7dc87280-kube-api-access-5chmq\") pod \"placement-operator-controller-manager-5db546f9d9-kkk8d\" (UID: \"3561dc03-64fa-4b51-b14c-02ef7dc87280\") " pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-kkk8d" Nov 21 14:00:39 crc kubenswrapper[5133]: I1121 14:00:39.024380 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-864885998-g2fdl"] Nov 21 14:00:39 crc kubenswrapper[5133]: I1121 14:00:39.032354 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-864885998-g2fdl" Nov 21 14:00:39 crc kubenswrapper[5133]: I1121 14:00:39.035297 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-tnvbf" Nov 21 14:00:39 crc kubenswrapper[5133]: I1121 14:00:39.063928 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5pnfk\" (UniqueName: \"kubernetes.io/projected/f2e66c49-bdd0-408a-867b-6e4c869df2a7-kube-api-access-5pnfk\") pod \"watcher-operator-controller-manager-864885998-g2fdl\" (UID: \"f2e66c49-bdd0-408a-867b-6e4c869df2a7\") " pod="openstack-operators/watcher-operator-controller-manager-864885998-g2fdl" Nov 21 14:00:39 crc kubenswrapper[5133]: I1121 14:00:39.064012 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4p9cx\" (UniqueName: \"kubernetes.io/projected/d8e037c0-d4af-4d74-9e8f-e749db78b81d-kube-api-access-4p9cx\") pod \"test-operator-controller-manager-5cb74df96-448nh\" (UID: \"d8e037c0-d4af-4d74-9e8f-e749db78b81d\") " pod="openstack-operators/test-operator-controller-manager-5cb74df96-448nh" Nov 21 14:00:39 crc kubenswrapper[5133]: I1121 14:00:39.064048 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vpl42\" (UniqueName: \"kubernetes.io/projected/167387d2-835f-4d70-8a59-de71037b8178-kube-api-access-vpl42\") pod \"swift-operator-controller-manager-6fdc4fcf86-vpq2d\" (UID: \"167387d2-835f-4d70-8a59-de71037b8178\") " pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-vpq2d" Nov 21 14:00:39 crc kubenswrapper[5133]: I1121 14:00:39.064092 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jlzz5\" (UniqueName: \"kubernetes.io/projected/6f45172d-e807-4bb7-b836-a2a6a53beccf-kube-api-access-jlzz5\") pod \"telemetry-operator-controller-manager-567f98c9d-772lb\" (UID: \"6f45172d-e807-4bb7-b836-a2a6a53beccf\") " pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-772lb" Nov 21 14:00:39 crc kubenswrapper[5133]: I1121 14:00:39.064743 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/0b72b567-d244-4d7e-984b-ba42dfb7be25-cert\") pod \"infra-operator-controller-manager-d5cc86f4b-7g4lm\" (UID: \"0b72b567-d244-4d7e-984b-ba42dfb7be25\") " pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-7g4lm" Nov 21 14:00:39 crc kubenswrapper[5133]: E1121 14:00:39.064862 5133 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 21 14:00:39 crc kubenswrapper[5133]: E1121 14:00:39.064910 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0b72b567-d244-4d7e-984b-ba42dfb7be25-cert podName:0b72b567-d244-4d7e-984b-ba42dfb7be25 nodeName:}" failed. No retries permitted until 2025-11-21 14:00:40.064892775 +0000 UTC m=+1099.862725023 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/0b72b567-d244-4d7e-984b-ba42dfb7be25-cert") pod "infra-operator-controller-manager-d5cc86f4b-7g4lm" (UID: "0b72b567-d244-4d7e-984b-ba42dfb7be25") : secret "infra-operator-webhook-server-cert" not found Nov 21 14:00:39 crc kubenswrapper[5133]: I1121 14:00:39.066985 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-864885998-g2fdl"] Nov 21 14:00:39 crc kubenswrapper[5133]: I1121 14:00:39.088443 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-tzn9t" Nov 21 14:00:39 crc kubenswrapper[5133]: I1121 14:00:39.111543 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jlzz5\" (UniqueName: \"kubernetes.io/projected/6f45172d-e807-4bb7-b836-a2a6a53beccf-kube-api-access-jlzz5\") pod \"telemetry-operator-controller-manager-567f98c9d-772lb\" (UID: \"6f45172d-e807-4bb7-b836-a2a6a53beccf\") " pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-772lb" Nov 21 14:00:39 crc kubenswrapper[5133]: I1121 14:00:39.126402 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vpl42\" (UniqueName: \"kubernetes.io/projected/167387d2-835f-4d70-8a59-de71037b8178-kube-api-access-vpl42\") pod \"swift-operator-controller-manager-6fdc4fcf86-vpq2d\" (UID: \"167387d2-835f-4d70-8a59-de71037b8178\") " pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-vpq2d" Nov 21 14:00:39 crc kubenswrapper[5133]: I1121 14:00:39.140897 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-vpq2d" Nov 21 14:00:39 crc kubenswrapper[5133]: I1121 14:00:39.145598 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-kkk8d" Nov 21 14:00:39 crc kubenswrapper[5133]: I1121 14:00:39.168248 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5pnfk\" (UniqueName: \"kubernetes.io/projected/f2e66c49-bdd0-408a-867b-6e4c869df2a7-kube-api-access-5pnfk\") pod \"watcher-operator-controller-manager-864885998-g2fdl\" (UID: \"f2e66c49-bdd0-408a-867b-6e4c869df2a7\") " pod="openstack-operators/watcher-operator-controller-manager-864885998-g2fdl" Nov 21 14:00:39 crc kubenswrapper[5133]: I1121 14:00:39.168352 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4p9cx\" (UniqueName: \"kubernetes.io/projected/d8e037c0-d4af-4d74-9e8f-e749db78b81d-kube-api-access-4p9cx\") pod \"test-operator-controller-manager-5cb74df96-448nh\" (UID: \"d8e037c0-d4af-4d74-9e8f-e749db78b81d\") " pod="openstack-operators/test-operator-controller-manager-5cb74df96-448nh" Nov 21 14:00:39 crc kubenswrapper[5133]: I1121 14:00:39.188104 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-679bdf8cc4-fsfb6"] Nov 21 14:00:39 crc kubenswrapper[5133]: I1121 14:00:39.189294 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-679bdf8cc4-fsfb6" Nov 21 14:00:39 crc kubenswrapper[5133]: I1121 14:00:39.191636 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-772lb" Nov 21 14:00:39 crc kubenswrapper[5133]: I1121 14:00:39.192585 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Nov 21 14:00:39 crc kubenswrapper[5133]: I1121 14:00:39.194471 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-4f8gr" Nov 21 14:00:39 crc kubenswrapper[5133]: I1121 14:00:39.194718 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"metrics-server-cert" Nov 21 14:00:39 crc kubenswrapper[5133]: I1121 14:00:39.194946 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4p9cx\" (UniqueName: \"kubernetes.io/projected/d8e037c0-d4af-4d74-9e8f-e749db78b81d-kube-api-access-4p9cx\") pod \"test-operator-controller-manager-5cb74df96-448nh\" (UID: \"d8e037c0-d4af-4d74-9e8f-e749db78b81d\") " pod="openstack-operators/test-operator-controller-manager-5cb74df96-448nh" Nov 21 14:00:39 crc kubenswrapper[5133]: I1121 14:00:39.202840 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5pnfk\" (UniqueName: \"kubernetes.io/projected/f2e66c49-bdd0-408a-867b-6e4c869df2a7-kube-api-access-5pnfk\") pod \"watcher-operator-controller-manager-864885998-g2fdl\" (UID: \"f2e66c49-bdd0-408a-867b-6e4c869df2a7\") " pod="openstack-operators/watcher-operator-controller-manager-864885998-g2fdl" Nov 21 14:00:39 crc kubenswrapper[5133]: I1121 14:00:39.222856 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-679bdf8cc4-fsfb6"] Nov 21 14:00:39 crc kubenswrapper[5133]: I1121 14:00:39.269772 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1993c427-2d4f-4f20-9390-e7d67d4a6fe2-metrics-certs\") pod \"openstack-operator-controller-manager-679bdf8cc4-fsfb6\" (UID: \"1993c427-2d4f-4f20-9390-e7d67d4a6fe2\") " pod="openstack-operators/openstack-operator-controller-manager-679bdf8cc4-fsfb6" Nov 21 14:00:39 crc kubenswrapper[5133]: I1121 14:00:39.269833 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/1993c427-2d4f-4f20-9390-e7d67d4a6fe2-webhook-certs\") pod \"openstack-operator-controller-manager-679bdf8cc4-fsfb6\" (UID: \"1993c427-2d4f-4f20-9390-e7d67d4a6fe2\") " pod="openstack-operators/openstack-operator-controller-manager-679bdf8cc4-fsfb6" Nov 21 14:00:39 crc kubenswrapper[5133]: I1121 14:00:39.269901 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9tj4z\" (UniqueName: \"kubernetes.io/projected/1993c427-2d4f-4f20-9390-e7d67d4a6fe2-kube-api-access-9tj4z\") pod \"openstack-operator-controller-manager-679bdf8cc4-fsfb6\" (UID: \"1993c427-2d4f-4f20-9390-e7d67d4a6fe2\") " pod="openstack-operators/openstack-operator-controller-manager-679bdf8cc4-fsfb6" Nov 21 14:00:39 crc kubenswrapper[5133]: I1121 14:00:39.313209 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-lfntj"] Nov 21 14:00:39 crc kubenswrapper[5133]: I1121 14:00:39.314873 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-lfntj" Nov 21 14:00:39 crc kubenswrapper[5133]: I1121 14:00:39.330546 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-xcj5p" Nov 21 14:00:39 crc kubenswrapper[5133]: I1121 14:00:39.344783 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-lfntj"] Nov 21 14:00:39 crc kubenswrapper[5133]: I1121 14:00:39.364528 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5cb74df96-448nh" Nov 21 14:00:39 crc kubenswrapper[5133]: I1121 14:00:39.366915 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-86dc4d89c8-znxpd"] Nov 21 14:00:39 crc kubenswrapper[5133]: I1121 14:00:39.370120 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-864885998-g2fdl" Nov 21 14:00:39 crc kubenswrapper[5133]: I1121 14:00:39.377056 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1993c427-2d4f-4f20-9390-e7d67d4a6fe2-metrics-certs\") pod \"openstack-operator-controller-manager-679bdf8cc4-fsfb6\" (UID: \"1993c427-2d4f-4f20-9390-e7d67d4a6fe2\") " pod="openstack-operators/openstack-operator-controller-manager-679bdf8cc4-fsfb6" Nov 21 14:00:39 crc kubenswrapper[5133]: I1121 14:00:39.377118 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/1993c427-2d4f-4f20-9390-e7d67d4a6fe2-webhook-certs\") pod \"openstack-operator-controller-manager-679bdf8cc4-fsfb6\" (UID: \"1993c427-2d4f-4f20-9390-e7d67d4a6fe2\") " pod="openstack-operators/openstack-operator-controller-manager-679bdf8cc4-fsfb6" Nov 21 14:00:39 crc kubenswrapper[5133]: I1121 14:00:39.377210 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9tj4z\" (UniqueName: \"kubernetes.io/projected/1993c427-2d4f-4f20-9390-e7d67d4a6fe2-kube-api-access-9tj4z\") pod \"openstack-operator-controller-manager-679bdf8cc4-fsfb6\" (UID: \"1993c427-2d4f-4f20-9390-e7d67d4a6fe2\") " pod="openstack-operators/openstack-operator-controller-manager-679bdf8cc4-fsfb6" Nov 21 14:00:39 crc kubenswrapper[5133]: I1121 14:00:39.377281 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lgz7d\" (UniqueName: \"kubernetes.io/projected/167b5c97-691f-4078-ac5f-e849a162b136-kube-api-access-lgz7d\") pod \"rabbitmq-cluster-operator-manager-668c99d594-lfntj\" (UID: \"167b5c97-691f-4078-ac5f-e849a162b136\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-lfntj" Nov 21 14:00:39 crc kubenswrapper[5133]: E1121 14:00:39.377462 5133 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 21 14:00:39 crc kubenswrapper[5133]: E1121 14:00:39.377530 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/1993c427-2d4f-4f20-9390-e7d67d4a6fe2-metrics-certs podName:1993c427-2d4f-4f20-9390-e7d67d4a6fe2 nodeName:}" failed. No retries permitted until 2025-11-21 14:00:39.877510145 +0000 UTC m=+1099.675342393 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/1993c427-2d4f-4f20-9390-e7d67d4a6fe2-metrics-certs") pod "openstack-operator-controller-manager-679bdf8cc4-fsfb6" (UID: "1993c427-2d4f-4f20-9390-e7d67d4a6fe2") : secret "metrics-server-cert" not found Nov 21 14:00:39 crc kubenswrapper[5133]: E1121 14:00:39.377795 5133 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 21 14:00:39 crc kubenswrapper[5133]: E1121 14:00:39.377818 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/1993c427-2d4f-4f20-9390-e7d67d4a6fe2-webhook-certs podName:1993c427-2d4f-4f20-9390-e7d67d4a6fe2 nodeName:}" failed. No retries permitted until 2025-11-21 14:00:39.877811273 +0000 UTC m=+1099.675643521 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/1993c427-2d4f-4f20-9390-e7d67d4a6fe2-webhook-certs") pod "openstack-operator-controller-manager-679bdf8cc4-fsfb6" (UID: "1993c427-2d4f-4f20-9390-e7d67d4a6fe2") : secret "webhook-server-cert" not found Nov 21 14:00:39 crc kubenswrapper[5133]: I1121 14:00:39.424801 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9tj4z\" (UniqueName: \"kubernetes.io/projected/1993c427-2d4f-4f20-9390-e7d67d4a6fe2-kube-api-access-9tj4z\") pod \"openstack-operator-controller-manager-679bdf8cc4-fsfb6\" (UID: \"1993c427-2d4f-4f20-9390-e7d67d4a6fe2\") " pod="openstack-operators/openstack-operator-controller-manager-679bdf8cc4-fsfb6" Nov 21 14:00:39 crc kubenswrapper[5133]: I1121 14:00:39.435944 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-774b86978c-q9q7l"] Nov 21 14:00:39 crc kubenswrapper[5133]: I1121 14:00:39.478461 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lgz7d\" (UniqueName: \"kubernetes.io/projected/167b5c97-691f-4078-ac5f-e849a162b136-kube-api-access-lgz7d\") pod \"rabbitmq-cluster-operator-manager-668c99d594-lfntj\" (UID: \"167b5c97-691f-4078-ac5f-e849a162b136\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-lfntj" Nov 21 14:00:39 crc kubenswrapper[5133]: I1121 14:00:39.478616 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/2036671e-8670-48c3-af60-5eee8087efa7-cert\") pod \"openstack-baremetal-operator-controller-manager-544b9bb9-5z85m\" (UID: \"2036671e-8670-48c3-af60-5eee8087efa7\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-5z85m" Nov 21 14:00:39 crc kubenswrapper[5133]: E1121 14:00:39.478745 5133 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 21 14:00:39 crc kubenswrapper[5133]: E1121 14:00:39.478803 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2036671e-8670-48c3-af60-5eee8087efa7-cert podName:2036671e-8670-48c3-af60-5eee8087efa7 nodeName:}" failed. No retries permitted until 2025-11-21 14:00:40.478789014 +0000 UTC m=+1100.276621262 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/2036671e-8670-48c3-af60-5eee8087efa7-cert") pod "openstack-baremetal-operator-controller-manager-544b9bb9-5z85m" (UID: "2036671e-8670-48c3-af60-5eee8087efa7") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 21 14:00:39 crc kubenswrapper[5133]: I1121 14:00:39.499460 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lgz7d\" (UniqueName: \"kubernetes.io/projected/167b5c97-691f-4078-ac5f-e849a162b136-kube-api-access-lgz7d\") pod \"rabbitmq-cluster-operator-manager-668c99d594-lfntj\" (UID: \"167b5c97-691f-4078-ac5f-e849a162b136\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-lfntj" Nov 21 14:00:39 crc kubenswrapper[5133]: I1121 14:00:39.524435 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-lfntj" Nov 21 14:00:39 crc kubenswrapper[5133]: I1121 14:00:39.824133 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-68c9694994-4h6lz"] Nov 21 14:00:39 crc kubenswrapper[5133]: I1121 14:00:39.874065 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-7d695c9b56-l7bft"] Nov 21 14:00:39 crc kubenswrapper[5133]: I1121 14:00:39.892484 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1993c427-2d4f-4f20-9390-e7d67d4a6fe2-metrics-certs\") pod \"openstack-operator-controller-manager-679bdf8cc4-fsfb6\" (UID: \"1993c427-2d4f-4f20-9390-e7d67d4a6fe2\") " pod="openstack-operators/openstack-operator-controller-manager-679bdf8cc4-fsfb6" Nov 21 14:00:39 crc kubenswrapper[5133]: I1121 14:00:39.892536 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/1993c427-2d4f-4f20-9390-e7d67d4a6fe2-webhook-certs\") pod \"openstack-operator-controller-manager-679bdf8cc4-fsfb6\" (UID: \"1993c427-2d4f-4f20-9390-e7d67d4a6fe2\") " pod="openstack-operators/openstack-operator-controller-manager-679bdf8cc4-fsfb6" Nov 21 14:00:39 crc kubenswrapper[5133]: E1121 14:00:39.892712 5133 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 21 14:00:39 crc kubenswrapper[5133]: E1121 14:00:39.892775 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/1993c427-2d4f-4f20-9390-e7d67d4a6fe2-webhook-certs podName:1993c427-2d4f-4f20-9390-e7d67d4a6fe2 nodeName:}" failed. No retries permitted until 2025-11-21 14:00:40.892754536 +0000 UTC m=+1100.690586784 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/1993c427-2d4f-4f20-9390-e7d67d4a6fe2-webhook-certs") pod "openstack-operator-controller-manager-679bdf8cc4-fsfb6" (UID: "1993c427-2d4f-4f20-9390-e7d67d4a6fe2") : secret "webhook-server-cert" not found Nov 21 14:00:39 crc kubenswrapper[5133]: E1121 14:00:39.893649 5133 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 21 14:00:39 crc kubenswrapper[5133]: E1121 14:00:39.893679 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/1993c427-2d4f-4f20-9390-e7d67d4a6fe2-metrics-certs podName:1993c427-2d4f-4f20-9390-e7d67d4a6fe2 nodeName:}" failed. No retries permitted until 2025-11-21 14:00:40.89367067 +0000 UTC m=+1100.691502918 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/1993c427-2d4f-4f20-9390-e7d67d4a6fe2-metrics-certs") pod "openstack-operator-controller-manager-679bdf8cc4-fsfb6" (UID: "1993c427-2d4f-4f20-9390-e7d67d4a6fe2") : secret "metrics-server-cert" not found Nov 21 14:00:39 crc kubenswrapper[5133]: I1121 14:00:39.902615 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-5c575964b7-fcp4p"] Nov 21 14:00:39 crc kubenswrapper[5133]: I1121 14:00:39.941281 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-774b86978c-q9q7l" event={"ID":"156c8963-8a3a-4cb2-93ca-a14ab1e2c4a8","Type":"ContainerStarted","Data":"3bbb3640b703873e14aca5a3162ec5ab75731dba32843f4f8165e888f56a3dfe"} Nov 21 14:00:39 crc kubenswrapper[5133]: I1121 14:00:39.942838 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-5c575964b7-fcp4p" event={"ID":"50fccca4-2734-4839-83b0-d220b0dfa1d6","Type":"ContainerStarted","Data":"099574421fc013037aa1cef6fd3087ea4a84e5f2a8624864232f261fce7cee45"} Nov 21 14:00:39 crc kubenswrapper[5133]: I1121 14:00:39.944215 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-4h6lz" event={"ID":"701735b8-41e5-47f9-9ac7-b8cfc0357597","Type":"ContainerStarted","Data":"38a4ff9f67c47797599aa1c2a99de334fc9fd5dd585e756d405e4b3099b32ff2"} Nov 21 14:00:39 crc kubenswrapper[5133]: I1121 14:00:39.947618 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-l7bft" event={"ID":"fbcf38a1-6730-49b5-bf19-9966465f2d1b","Type":"ContainerStarted","Data":"88c01f028cf6e414b5c19c95b247e58cd3d52c76bcadd32419db14358429b5e6"} Nov 21 14:00:39 crc kubenswrapper[5133]: I1121 14:00:39.948765 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-znxpd" event={"ID":"99e612e9-ba8d-41cd-9654-4332a4132c4f","Type":"ContainerStarted","Data":"8df48351cefd605711bf81e37439fa2464096ea3c166c0505f30caad0d5b7f67"} Nov 21 14:00:40 crc kubenswrapper[5133]: I1121 14:00:40.096311 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/0b72b567-d244-4d7e-984b-ba42dfb7be25-cert\") pod \"infra-operator-controller-manager-d5cc86f4b-7g4lm\" (UID: \"0b72b567-d244-4d7e-984b-ba42dfb7be25\") " pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-7g4lm" Nov 21 14:00:40 crc kubenswrapper[5133]: I1121 14:00:40.119046 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/0b72b567-d244-4d7e-984b-ba42dfb7be25-cert\") pod \"infra-operator-controller-manager-d5cc86f4b-7g4lm\" (UID: \"0b72b567-d244-4d7e-984b-ba42dfb7be25\") " pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-7g4lm" Nov 21 14:00:40 crc kubenswrapper[5133]: I1121 14:00:40.369814 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-7g4lm" Nov 21 14:00:40 crc kubenswrapper[5133]: I1121 14:00:40.378974 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-567f98c9d-772lb"] Nov 21 14:00:40 crc kubenswrapper[5133]: I1121 14:00:40.410251 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-lfntj"] Nov 21 14:00:40 crc kubenswrapper[5133]: I1121 14:00:40.431468 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-68b95954c9-g6kng"] Nov 21 14:00:40 crc kubenswrapper[5133]: I1121 14:00:40.520241 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-58bb8d67cc-mj6sx"] Nov 21 14:00:40 crc kubenswrapper[5133]: I1121 14:00:40.520309 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-79556f57fc-vpjk7"] Nov 21 14:00:40 crc kubenswrapper[5133]: I1121 14:00:40.520326 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-fd75fd47d-mxphc"] Nov 21 14:00:40 crc kubenswrapper[5133]: I1121 14:00:40.520342 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-5bfcdc958c-glgx8"] Nov 21 14:00:40 crc kubenswrapper[5133]: I1121 14:00:40.520357 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-mwsrn"] Nov 21 14:00:40 crc kubenswrapper[5133]: I1121 14:00:40.521205 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/2036671e-8670-48c3-af60-5eee8087efa7-cert\") pod \"openstack-baremetal-operator-controller-manager-544b9bb9-5z85m\" (UID: \"2036671e-8670-48c3-af60-5eee8087efa7\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-5z85m" Nov 21 14:00:40 crc kubenswrapper[5133]: E1121 14:00:40.521374 5133 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 21 14:00:40 crc kubenswrapper[5133]: E1121 14:00:40.521469 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2036671e-8670-48c3-af60-5eee8087efa7-cert podName:2036671e-8670-48c3-af60-5eee8087efa7 nodeName:}" failed. No retries permitted until 2025-11-21 14:00:42.52144392 +0000 UTC m=+1102.319276168 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/2036671e-8670-48c3-af60-5eee8087efa7-cert") pod "openstack-baremetal-operator-controller-manager-544b9bb9-5z85m" (UID: "2036671e-8670-48c3-af60-5eee8087efa7") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 21 14:00:40 crc kubenswrapper[5133]: I1121 14:00:40.528973 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-748dc6576f-gqfpz"] Nov 21 14:00:40 crc kubenswrapper[5133]: W1121 14:00:40.531424 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2da1ddc9_a310_4a6a_90de_b63c8b33448a.slice/crio-88c7c96db2b70eb3c337230904a87f351433e72b4519e3cb08229271b4a059b6 WatchSource:0}: Error finding container 88c7c96db2b70eb3c337230904a87f351433e72b4519e3cb08229271b4a059b6: Status 404 returned error can't find the container with id 88c7c96db2b70eb3c337230904a87f351433e72b4519e3cb08229271b4a059b6 Nov 21 14:00:40 crc kubenswrapper[5133]: I1121 14:00:40.536098 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-5db546f9d9-kkk8d"] Nov 21 14:00:40 crc kubenswrapper[5133]: I1121 14:00:40.540920 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-vt2kw"] Nov 21 14:00:40 crc kubenswrapper[5133]: I1121 14:00:40.544513 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-864885998-g2fdl"] Nov 21 14:00:40 crc kubenswrapper[5133]: E1121 14:00:40.546052 5133 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/watcher-operator@sha256:4838402d41d42c56613d43dc5041aae475a2b18e6172491d6c4d4a78a580697f,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-5pnfk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-864885998-g2fdl_openstack-operators(f2e66c49-bdd0-408a-867b-6e4c869df2a7): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 21 14:00:40 crc kubenswrapper[5133]: I1121 14:00:40.560265 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-5cb74df96-448nh"] Nov 21 14:00:40 crc kubenswrapper[5133]: I1121 14:00:40.560327 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-6fdc4fcf86-vpq2d"] Nov 21 14:00:40 crc kubenswrapper[5133]: E1121 14:00:40.560497 5133 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-5pnfk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-864885998-g2fdl_openstack-operators(f2e66c49-bdd0-408a-867b-6e4c869df2a7): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 21 14:00:40 crc kubenswrapper[5133]: E1121 14:00:40.561798 5133 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/swift-operator@sha256:c0b5f124a37c1538042c0e63f0978429572e2a851d7f3a6eb80de09b86d755a0,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-vpl42,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-6fdc4fcf86-vpq2d_openstack-operators(167387d2-835f-4d70-8a59-de71037b8178): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 21 14:00:40 crc kubenswrapper[5133]: E1121 14:00:40.561949 5133 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/neutron-operator@sha256:207578cb433471cc1a79c21a808c8a15489d1d3c9fa77e29f3f697c33917fec6,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-jpplc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod neutron-operator-controller-manager-7c57c8bbc4-vt2kw_openstack-operators(2da1ddc9-a310-4a6a-90de-b63c8b33448a): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 21 14:00:40 crc kubenswrapper[5133]: I1121 14:00:40.564110 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-66cf5c67ff-tzn9t"] Nov 21 14:00:40 crc kubenswrapper[5133]: E1121 14:00:40.564225 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/watcher-operator-controller-manager-864885998-g2fdl" podUID="f2e66c49-bdd0-408a-867b-6e4c869df2a7" Nov 21 14:00:40 crc kubenswrapper[5133]: E1121 14:00:40.565236 5133 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-vpl42,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-6fdc4fcf86-vpq2d_openstack-operators(167387d2-835f-4d70-8a59-de71037b8178): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 21 14:00:40 crc kubenswrapper[5133]: E1121 14:00:40.565664 5133 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-jpplc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod neutron-operator-controller-manager-7c57c8bbc4-vt2kw_openstack-operators(2da1ddc9-a310-4a6a-90de-b63c8b33448a): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 21 14:00:40 crc kubenswrapper[5133]: E1121 14:00:40.567067 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-vt2kw" podUID="2da1ddc9-a310-4a6a-90de-b63c8b33448a" Nov 21 14:00:40 crc kubenswrapper[5133]: E1121 14:00:40.567149 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-vpq2d" podUID="167387d2-835f-4d70-8a59-de71037b8178" Nov 21 14:00:40 crc kubenswrapper[5133]: E1121 14:00:40.576454 5133 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/test-operator@sha256:82207e753574d4be246f86c4b074500d66cf20214aa80f0a8525cf3287a35e6d,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-4p9cx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-5cb74df96-448nh_openstack-operators(d8e037c0-d4af-4d74-9e8f-e749db78b81d): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 21 14:00:40 crc kubenswrapper[5133]: E1121 14:00:40.584508 5133 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ovn-operator@sha256:5d49d4594c66eda7b151746cc6e1d3c67c0129b4503eeb043a64ae8ec2da6a1b,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-njvlq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-operator-controller-manager-66cf5c67ff-tzn9t_openstack-operators(71d8bd8e-380d-458b-93b8-7e1a68964294): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 21 14:00:40 crc kubenswrapper[5133]: E1121 14:00:40.584628 5133 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-4p9cx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-5cb74df96-448nh_openstack-operators(d8e037c0-d4af-4d74-9e8f-e749db78b81d): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 21 14:00:40 crc kubenswrapper[5133]: E1121 14:00:40.586629 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/test-operator-controller-manager-5cb74df96-448nh" podUID="d8e037c0-d4af-4d74-9e8f-e749db78b81d" Nov 21 14:00:40 crc kubenswrapper[5133]: E1121 14:00:40.588433 5133 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-njvlq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-operator-controller-manager-66cf5c67ff-tzn9t_openstack-operators(71d8bd8e-380d-458b-93b8-7e1a68964294): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 21 14:00:40 crc kubenswrapper[5133]: E1121 14:00:40.589554 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-tzn9t" podUID="71d8bd8e-380d-458b-93b8-7e1a68964294" Nov 21 14:00:40 crc kubenswrapper[5133]: I1121 14:00:40.926131 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1993c427-2d4f-4f20-9390-e7d67d4a6fe2-metrics-certs\") pod \"openstack-operator-controller-manager-679bdf8cc4-fsfb6\" (UID: \"1993c427-2d4f-4f20-9390-e7d67d4a6fe2\") " pod="openstack-operators/openstack-operator-controller-manager-679bdf8cc4-fsfb6" Nov 21 14:00:40 crc kubenswrapper[5133]: I1121 14:00:40.926554 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/1993c427-2d4f-4f20-9390-e7d67d4a6fe2-webhook-certs\") pod \"openstack-operator-controller-manager-679bdf8cc4-fsfb6\" (UID: \"1993c427-2d4f-4f20-9390-e7d67d4a6fe2\") " pod="openstack-operators/openstack-operator-controller-manager-679bdf8cc4-fsfb6" Nov 21 14:00:40 crc kubenswrapper[5133]: I1121 14:00:40.932771 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1993c427-2d4f-4f20-9390-e7d67d4a6fe2-metrics-certs\") pod \"openstack-operator-controller-manager-679bdf8cc4-fsfb6\" (UID: \"1993c427-2d4f-4f20-9390-e7d67d4a6fe2\") " pod="openstack-operators/openstack-operator-controller-manager-679bdf8cc4-fsfb6" Nov 21 14:00:40 crc kubenswrapper[5133]: I1121 14:00:40.947769 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/1993c427-2d4f-4f20-9390-e7d67d4a6fe2-webhook-certs\") pod \"openstack-operator-controller-manager-679bdf8cc4-fsfb6\" (UID: \"1993c427-2d4f-4f20-9390-e7d67d4a6fe2\") " pod="openstack-operators/openstack-operator-controller-manager-679bdf8cc4-fsfb6" Nov 21 14:00:40 crc kubenswrapper[5133]: I1121 14:00:40.975733 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cb74df96-448nh" event={"ID":"d8e037c0-d4af-4d74-9e8f-e749db78b81d","Type":"ContainerStarted","Data":"6ec5450c8087718bd5ac8091bdea59547ff52b208cec84b1eb354ebcedbe05d5"} Nov 21 14:00:40 crc kubenswrapper[5133]: E1121 14:00:40.988394 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:82207e753574d4be246f86c4b074500d66cf20214aa80f0a8525cf3287a35e6d\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/test-operator-controller-manager-5cb74df96-448nh" podUID="d8e037c0-d4af-4d74-9e8f-e749db78b81d" Nov 21 14:00:40 crc kubenswrapper[5133]: I1121 14:00:40.995225 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-mxphc" event={"ID":"a076ae54-b994-453a-9361-2bf9acab8d2d","Type":"ContainerStarted","Data":"cc76ccdb0c99237e7877b85baf7b548805767856cf0e1fad91b1bd0ff9d52074"} Nov 21 14:00:41 crc kubenswrapper[5133]: I1121 14:00:41.017317 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-vt2kw" event={"ID":"2da1ddc9-a310-4a6a-90de-b63c8b33448a","Type":"ContainerStarted","Data":"88c7c96db2b70eb3c337230904a87f351433e72b4519e3cb08229271b4a059b6"} Nov 21 14:00:41 crc kubenswrapper[5133]: E1121 14:00:41.024410 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/neutron-operator@sha256:207578cb433471cc1a79c21a808c8a15489d1d3c9fa77e29f3f697c33917fec6\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-vt2kw" podUID="2da1ddc9-a310-4a6a-90de-b63c8b33448a" Nov 21 14:00:41 crc kubenswrapper[5133]: I1121 14:00:41.034659 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-gqfpz" event={"ID":"28c76d9f-b15c-4b67-b616-88f89fea7eb7","Type":"ContainerStarted","Data":"67c7052df7338dd56c29fcf65e6ab7dd84cb65ff2274e88ed15126060f4a1779"} Nov 21 14:00:41 crc kubenswrapper[5133]: I1121 14:00:41.036400 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-g6kng" event={"ID":"abdbcac3-a654-4558-b71c-6b0a4d6c9c19","Type":"ContainerStarted","Data":"582add583f11f9969f7955252be1e52b2de12f8022dae1f6b8689588ba3caaca"} Nov 21 14:00:41 crc kubenswrapper[5133]: I1121 14:00:41.051808 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-772lb" event={"ID":"6f45172d-e807-4bb7-b836-a2a6a53beccf","Type":"ContainerStarted","Data":"ea21e11af85a041e3db58b2f4aa3f33e83365d1931512eb13126699ff10bcd44"} Nov 21 14:00:41 crc kubenswrapper[5133]: I1121 14:00:41.070770 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-d5cc86f4b-7g4lm"] Nov 21 14:00:41 crc kubenswrapper[5133]: I1121 14:00:41.083628 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-864885998-g2fdl" event={"ID":"f2e66c49-bdd0-408a-867b-6e4c869df2a7","Type":"ContainerStarted","Data":"701e40734759f79b30e0bcdf087d0412dc23227104cefd20baa1da58eed171bf"} Nov 21 14:00:41 crc kubenswrapper[5133]: E1121 14:00:41.095702 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:4838402d41d42c56613d43dc5041aae475a2b18e6172491d6c4d4a78a580697f\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/watcher-operator-controller-manager-864885998-g2fdl" podUID="f2e66c49-bdd0-408a-867b-6e4c869df2a7" Nov 21 14:00:41 crc kubenswrapper[5133]: I1121 14:00:41.100322 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-lfntj" event={"ID":"167b5c97-691f-4078-ac5f-e849a162b136","Type":"ContainerStarted","Data":"4b341e13ae105866f56bd05bad2dec60045925a584d9e2b2ba20a7610494c419"} Nov 21 14:00:41 crc kubenswrapper[5133]: I1121 14:00:41.104224 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-679bdf8cc4-fsfb6" Nov 21 14:00:41 crc kubenswrapper[5133]: I1121 14:00:41.108406 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-kkk8d" event={"ID":"3561dc03-64fa-4b51-b14c-02ef7dc87280","Type":"ContainerStarted","Data":"b4a4d51ee7b90bd434d729611cb5db79b55f630369a5c55e21d35c1e0177d449"} Nov 21 14:00:41 crc kubenswrapper[5133]: I1121 14:00:41.125663 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-glgx8" event={"ID":"d1eae28c-1654-4cfc-a380-c56e52bdd2d5","Type":"ContainerStarted","Data":"3a6ade7f9854b5ec03e31d4cc85f3ea4c874fd0aeb18fe8fcc5a35701354b29a"} Nov 21 14:00:41 crc kubenswrapper[5133]: I1121 14:00:41.166525 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-vpjk7" event={"ID":"2b5268ec-12ae-4aee-84f2-f176c3e8f1c3","Type":"ContainerStarted","Data":"9c0dd7ce7cb5ffa76fdf0a55a2d3fab81f6a3b6d785e11d4e0d40fa594345557"} Nov 21 14:00:41 crc kubenswrapper[5133]: I1121 14:00:41.200691 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-tzn9t" event={"ID":"71d8bd8e-380d-458b-93b8-7e1a68964294","Type":"ContainerStarted","Data":"d7acafac6b9c0ecd000367f44508596a58c0d433714c53df829a5b160c8b0077"} Nov 21 14:00:41 crc kubenswrapper[5133]: I1121 14:00:41.209789 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-mwsrn" event={"ID":"a7a7378a-ba96-4a74-9730-61c7e0215843","Type":"ContainerStarted","Data":"22f437dfafaaae342f321477fc4f0626974911f6551e8d9c5f77b25ed2d54ab5"} Nov 21 14:00:41 crc kubenswrapper[5133]: I1121 14:00:41.211285 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-mj6sx" event={"ID":"641bce51-c8fb-4956-a13d-e42d7204b3d2","Type":"ContainerStarted","Data":"34a24713cbe2fbf8e0d4a4a160c90eae55b1065a59d5cda85cbd54b462ef5c68"} Nov 21 14:00:41 crc kubenswrapper[5133]: I1121 14:00:41.212280 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-vpq2d" event={"ID":"167387d2-835f-4d70-8a59-de71037b8178","Type":"ContainerStarted","Data":"a3561c721267729fff54c3cd22f8b3edbedf214e1d5bee5cc4d01d4c5c4e8497"} Nov 21 14:00:41 crc kubenswrapper[5133]: E1121 14:00:41.214074 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:c0b5f124a37c1538042c0e63f0978429572e2a851d7f3a6eb80de09b86d755a0\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-vpq2d" podUID="167387d2-835f-4d70-8a59-de71037b8178" Nov 21 14:00:41 crc kubenswrapper[5133]: E1121 14:00:41.214158 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ovn-operator@sha256:5d49d4594c66eda7b151746cc6e1d3c67c0129b4503eeb043a64ae8ec2da6a1b\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-tzn9t" podUID="71d8bd8e-380d-458b-93b8-7e1a68964294" Nov 21 14:00:41 crc kubenswrapper[5133]: I1121 14:00:41.629782 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-679bdf8cc4-fsfb6"] Nov 21 14:00:41 crc kubenswrapper[5133]: W1121 14:00:41.638391 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1993c427_2d4f_4f20_9390_e7d67d4a6fe2.slice/crio-0423f06621d7864e912c7ade5b7b82897470ce30b663d8dbacf2b8c864ae324c WatchSource:0}: Error finding container 0423f06621d7864e912c7ade5b7b82897470ce30b663d8dbacf2b8c864ae324c: Status 404 returned error can't find the container with id 0423f06621d7864e912c7ade5b7b82897470ce30b663d8dbacf2b8c864ae324c Nov 21 14:00:42 crc kubenswrapper[5133]: I1121 14:00:42.240212 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-679bdf8cc4-fsfb6" event={"ID":"1993c427-2d4f-4f20-9390-e7d67d4a6fe2","Type":"ContainerStarted","Data":"57383e0ad64357bd1ae10c3a154b149b0855911d16fcc8764a5b0e045035db14"} Nov 21 14:00:42 crc kubenswrapper[5133]: I1121 14:00:42.240803 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-679bdf8cc4-fsfb6" event={"ID":"1993c427-2d4f-4f20-9390-e7d67d4a6fe2","Type":"ContainerStarted","Data":"0423f06621d7864e912c7ade5b7b82897470ce30b663d8dbacf2b8c864ae324c"} Nov 21 14:00:42 crc kubenswrapper[5133]: I1121 14:00:42.240884 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-679bdf8cc4-fsfb6" Nov 21 14:00:42 crc kubenswrapper[5133]: I1121 14:00:42.251175 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-7g4lm" event={"ID":"0b72b567-d244-4d7e-984b-ba42dfb7be25","Type":"ContainerStarted","Data":"17289e93075e35a4627765edacf62efbef16f853474f16d5c7ea4523dcf15019"} Nov 21 14:00:42 crc kubenswrapper[5133]: E1121 14:00:42.257200 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:c0b5f124a37c1538042c0e63f0978429572e2a851d7f3a6eb80de09b86d755a0\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-vpq2d" podUID="167387d2-835f-4d70-8a59-de71037b8178" Nov 21 14:00:42 crc kubenswrapper[5133]: E1121 14:00:42.258890 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:4838402d41d42c56613d43dc5041aae475a2b18e6172491d6c4d4a78a580697f\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/watcher-operator-controller-manager-864885998-g2fdl" podUID="f2e66c49-bdd0-408a-867b-6e4c869df2a7" Nov 21 14:00:42 crc kubenswrapper[5133]: E1121 14:00:42.259309 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:82207e753574d4be246f86c4b074500d66cf20214aa80f0a8525cf3287a35e6d\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/test-operator-controller-manager-5cb74df96-448nh" podUID="d8e037c0-d4af-4d74-9e8f-e749db78b81d" Nov 21 14:00:42 crc kubenswrapper[5133]: E1121 14:00:42.259863 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ovn-operator@sha256:5d49d4594c66eda7b151746cc6e1d3c67c0129b4503eeb043a64ae8ec2da6a1b\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-tzn9t" podUID="71d8bd8e-380d-458b-93b8-7e1a68964294" Nov 21 14:00:42 crc kubenswrapper[5133]: E1121 14:00:42.259922 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/neutron-operator@sha256:207578cb433471cc1a79c21a808c8a15489d1d3c9fa77e29f3f697c33917fec6\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-vt2kw" podUID="2da1ddc9-a310-4a6a-90de-b63c8b33448a" Nov 21 14:00:42 crc kubenswrapper[5133]: I1121 14:00:42.378832 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-679bdf8cc4-fsfb6" podStartSLOduration=4.378804266 podStartE2EDuration="4.378804266s" podCreationTimestamp="2025-11-21 14:00:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 14:00:42.325324936 +0000 UTC m=+1102.123157184" watchObservedRunningTime="2025-11-21 14:00:42.378804266 +0000 UTC m=+1102.176636514" Nov 21 14:00:42 crc kubenswrapper[5133]: I1121 14:00:42.575175 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/2036671e-8670-48c3-af60-5eee8087efa7-cert\") pod \"openstack-baremetal-operator-controller-manager-544b9bb9-5z85m\" (UID: \"2036671e-8670-48c3-af60-5eee8087efa7\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-5z85m" Nov 21 14:00:42 crc kubenswrapper[5133]: I1121 14:00:42.585239 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/2036671e-8670-48c3-af60-5eee8087efa7-cert\") pod \"openstack-baremetal-operator-controller-manager-544b9bb9-5z85m\" (UID: \"2036671e-8670-48c3-af60-5eee8087efa7\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-5z85m" Nov 21 14:00:42 crc kubenswrapper[5133]: I1121 14:00:42.625315 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-5z85m" Nov 21 14:00:43 crc kubenswrapper[5133]: I1121 14:00:43.021554 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-5z85m"] Nov 21 14:00:43 crc kubenswrapper[5133]: I1121 14:00:43.277824 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-5z85m" event={"ID":"2036671e-8670-48c3-af60-5eee8087efa7","Type":"ContainerStarted","Data":"42ccfc73271eb834c04849c0a1f9fcb01142ce4231066de34bc058073388ea57"} Nov 21 14:00:51 crc kubenswrapper[5133]: I1121 14:00:51.112788 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-679bdf8cc4-fsfb6" Nov 21 14:00:53 crc kubenswrapper[5133]: I1121 14:00:53.463708 5133 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 21 14:00:54 crc kubenswrapper[5133]: E1121 14:00:54.898177 5133 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/nova-operator@sha256:c053e34316044f14929e16e4f0d97f9f1b24cb68b5e22b925ca74c66aaaed0a7" Nov 21 14:00:54 crc kubenswrapper[5133]: E1121 14:00:54.898792 5133 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/nova-operator@sha256:c053e34316044f14929e16e4f0d97f9f1b24cb68b5e22b925ca74c66aaaed0a7,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-b6wh4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-operator-controller-manager-79556f57fc-vpjk7_openstack-operators(2b5268ec-12ae-4aee-84f2-f176c3e8f1c3): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 21 14:00:55 crc kubenswrapper[5133]: E1121 14:00:55.555578 5133 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/glance-operator@sha256:d38faa9070da05487afdaa9e261ad39274c2ed862daf42efa460a040431f1991" Nov 21 14:00:55 crc kubenswrapper[5133]: E1121 14:00:55.555954 5133 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/glance-operator@sha256:d38faa9070da05487afdaa9e261ad39274c2ed862daf42efa460a040431f1991,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-g2v6f,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-operator-controller-manager-68b95954c9-g6kng_openstack-operators(abdbcac3-a654-4558-b71c-6b0a4d6c9c19): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 21 14:00:56 crc kubenswrapper[5133]: E1121 14:00:56.540694 5133 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/ironic-operator@sha256:b582189b55fddc180a6d468c9dba7078009a693db37b4093d4ba0c99ec675377" Nov 21 14:00:56 crc kubenswrapper[5133]: E1121 14:00:56.540912 5133 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ironic-operator@sha256:b582189b55fddc180a6d468c9dba7078009a693db37b4093d4ba0c99ec675377,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-bblpw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ironic-operator-controller-manager-5bfcdc958c-glgx8_openstack-operators(d1eae28c-1654-4cfc-a380-c56e52bdd2d5): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 21 14:00:56 crc kubenswrapper[5133]: E1121 14:00:56.961906 5133 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/octavia-operator@sha256:442c269d79163f8da75505019c02e9f0815837aaadcaddacb8e6c12df297ca13" Nov 21 14:00:56 crc kubenswrapper[5133]: E1121 14:00:56.962122 5133 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/octavia-operator@sha256:442c269d79163f8da75505019c02e9f0815837aaadcaddacb8e6c12df297ca13,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-cb7db,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod octavia-operator-controller-manager-fd75fd47d-mxphc_openstack-operators(a076ae54-b994-453a-9361-2bf9acab8d2d): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 21 14:00:57 crc kubenswrapper[5133]: E1121 14:00:57.429819 5133 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/mariadb-operator@sha256:7b90521b9e9cb4eb43c2f1c3bf85dbd068d684315f4f705b07708dd078df9d04" Nov 21 14:00:57 crc kubenswrapper[5133]: E1121 14:00:57.430290 5133 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/mariadb-operator@sha256:7b90521b9e9cb4eb43c2f1c3bf85dbd068d684315f4f705b07708dd078df9d04,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-j69bb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod mariadb-operator-controller-manager-cb6c4fdb7-mwsrn_openstack-operators(a7a7378a-ba96-4a74-9730-61c7e0215843): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 21 14:01:05 crc kubenswrapper[5133]: E1121 14:01:05.583676 5133 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/keystone-operator@sha256:3ef72bbd7cce89ff54d850ff44ca6d7b2360834a502da3d561aeb6fd3d9af50a" Nov 21 14:01:05 crc kubenswrapper[5133]: E1121 14:01:05.584587 5133 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/keystone-operator@sha256:3ef72bbd7cce89ff54d850ff44ca6d7b2360834a502da3d561aeb6fd3d9af50a,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-7qmxk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod keystone-operator-controller-manager-748dc6576f-gqfpz_openstack-operators(28c76d9f-b15c-4b67-b616-88f89fea7eb7): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 21 14:01:06 crc kubenswrapper[5133]: E1121 14:01:06.054589 5133 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:78852f8ba332a5756c1551c126157f735279101a0fc3277ba4aa4db3478789dd" Nov 21 14:01:06 crc kubenswrapper[5133]: E1121 14:01:06.055205 5133 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:78852f8ba332a5756c1551c126157f735279101a0fc3277ba4aa4db3478789dd,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:true,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-baremetal-operator-agent:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_ANSIBLEEE_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-ansibleee-runner:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_EVALUATOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-evaluator:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_LISTENER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-listener:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_NOTIFIER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-notifier:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_APACHE_IMAGE_URL_DEFAULT,Value:registry.redhat.io/ubi9/httpd-24:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_KEYSTONE_LISTENER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-keystone-listener:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_CENTRAL_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_COMPUTE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-compute:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_IPMI_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_MYSQLD_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/prometheus/mysqld-exporter:v0.15.1,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_NOTIFICATION_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-notification:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_SGCORE_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/sg-core:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_BACKUP_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-backup:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_VOLUME_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-volume:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CLOUDKITTY_API_IMAGE_URL_DEFAULT,Value:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CLOUDKITTY_PROC_IMAGE_URL_DEFAULT,Value:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-processor:current,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_BACKENDBIND9_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-backend-bind9:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_CENTRAL_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-central:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_MDNS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-mdns:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_PRODUCER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-producer:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_UNBOUND_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-unbound:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_FRR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-frr:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_ISCSID_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-iscsid:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_KEPLER_IMAGE_URL_DEFAULT,Value:quay.io/sustainable_computing_io/kepler:release-0.7.12,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_LOGROTATE_CROND_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cron:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_MULTIPATHD_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-multipathd:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_DHCP_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-dhcp-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_METADATA_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_OVN_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-ovn-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_SRIOV_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-sriov-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NODE_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/prometheus/node-exporter:v1.5.0,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_OVN_BGP_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-bgp-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_PODMAN_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/navidys/prometheus-podman-exporter:v1.10.1,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_GLANCE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-glance-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_CFNAPI_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-api-cfn:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_ENGINE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-engine:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HORIZON_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_INFRA_MEMCACHED_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-memcached:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_INFRA_REDIS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-redis:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_CONDUCTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-conductor:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_INSPECTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-inspector:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_NEUTRON_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-neutron-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_PXE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-pxe:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_PYTHON_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/ironic-python-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_KEYSTONE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-keystone:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_KSM_IMAGE_URL_DEFAULT,Value:registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_SHARE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-share:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MARIADB_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NET_UTILS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-netutils:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NEUTRON_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_COMPUTE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-compute:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_CONDUCTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-conductor:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_NOVNC_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-novncproxy:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_HEALTHMANAGER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-health-manager:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_HOUSEKEEPING_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-housekeeping:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_RSYSLOG_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-rsyslog:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_CLIENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-openstackclient:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_MUST_GATHER_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-must-gather:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_NETWORK_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OS_CONTAINER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/edpm-hardened-uefi:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_CONTROLLER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_CONTROLLER_OVS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-base:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_NB_DBCLUSTER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-nb-db-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_NORTHD_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-northd:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_SB_DBCLUSTER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-sb-db-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_PLACEMENT_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-placement-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_RABBITMQ_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_ACCOUNT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-account:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_CONTAINER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-container:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_OBJECT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-object:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_PROXY_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-proxy-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_TEST_TEMPEST_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_APPLIER_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-applier:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_DECISION_ENGINE_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-decision-engine:current-podified,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:cert,ReadOnly:true,MountPath:/tmp/k8s-webhook-server/serving-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-x99rz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-baremetal-operator-controller-manager-544b9bb9-5z85m_openstack-operators(2036671e-8670-48c3-af60-5eee8087efa7): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 21 14:01:06 crc kubenswrapper[5133]: E1121 14:01:06.227857 5133 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.2:5001/openstack-k8s-operators/cinder-operator:5c079ca6292ab98f866ec1d60dc6d1dfbd27072c" Nov 21 14:01:06 crc kubenswrapper[5133]: E1121 14:01:06.227954 5133 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.2:5001/openstack-k8s-operators/cinder-operator:5c079ca6292ab98f866ec1d60dc6d1dfbd27072c" Nov 21 14:01:06 crc kubenswrapper[5133]: E1121 14:01:06.228199 5133 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:38.102.83.2:5001/openstack-k8s-operators/cinder-operator:5c079ca6292ab98f866ec1d60dc6d1dfbd27072c,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-wh692,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-operator-controller-manager-5c575964b7-fcp4p_openstack-operators(50fccca4-2734-4839-83b0-d220b0dfa1d6): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 21 14:01:07 crc kubenswrapper[5133]: E1121 14:01:07.266254 5133 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2" Nov 21 14:01:07 crc kubenswrapper[5133]: E1121 14:01:07.266525 5133 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-lgz7d,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-lfntj_openstack-operators(167b5c97-691f-4078-ac5f-e849a162b136): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 21 14:01:07 crc kubenswrapper[5133]: E1121 14:01:07.267776 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-lfntj" podUID="167b5c97-691f-4078-ac5f-e849a162b136" Nov 21 14:01:07 crc kubenswrapper[5133]: E1121 14:01:07.498701 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-lfntj" podUID="167b5c97-691f-4078-ac5f-e849a162b136" Nov 21 14:01:11 crc kubenswrapper[5133]: I1121 14:01:11.574383 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-kkk8d" event={"ID":"3561dc03-64fa-4b51-b14c-02ef7dc87280","Type":"ContainerStarted","Data":"d2435d09c9aab2a2ad40a79a26ad480f6d0ac446380feefc7efbbf11074af131"} Nov 21 14:01:11 crc kubenswrapper[5133]: I1121 14:01:11.579053 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-774b86978c-q9q7l" event={"ID":"156c8963-8a3a-4cb2-93ca-a14ab1e2c4a8","Type":"ContainerStarted","Data":"50779b2fcb38af2076ab0a538a38927e4d020b7cbe215afc479b6aa286d0ca99"} Nov 21 14:01:11 crc kubenswrapper[5133]: I1121 14:01:11.589877 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-7g4lm" event={"ID":"0b72b567-d244-4d7e-984b-ba42dfb7be25","Type":"ContainerStarted","Data":"edc694ab0d85c4313e6d2eab79b7506d9ab715a96c3b5f095de41c5d11c8998d"} Nov 21 14:01:11 crc kubenswrapper[5133]: I1121 14:01:11.605120 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-772lb" event={"ID":"6f45172d-e807-4bb7-b836-a2a6a53beccf","Type":"ContainerStarted","Data":"d8e7575c7d562c292f023e6846e168b6da691388a80de09fe9e9048d62bce40e"} Nov 21 14:01:11 crc kubenswrapper[5133]: I1121 14:01:11.646473 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-mj6sx" event={"ID":"641bce51-c8fb-4956-a13d-e42d7204b3d2","Type":"ContainerStarted","Data":"eaadb0b677784802af01c300e4d862fed412063955189a1bbf3d0b740aec8a01"} Nov 21 14:01:11 crc kubenswrapper[5133]: I1121 14:01:11.650608 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-4h6lz" event={"ID":"701735b8-41e5-47f9-9ac7-b8cfc0357597","Type":"ContainerStarted","Data":"a134a7a54b645df260174c3c3a9acf73e44ae6174f0cf6aa1b8c506622725cf2"} Nov 21 14:01:11 crc kubenswrapper[5133]: I1121 14:01:11.652837 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-l7bft" event={"ID":"fbcf38a1-6730-49b5-bf19-9966465f2d1b","Type":"ContainerStarted","Data":"808aa65746061d624123ce6a566e5b121050a5135f591c007fb05208229f4448"} Nov 21 14:01:11 crc kubenswrapper[5133]: I1121 14:01:11.664838 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cb74df96-448nh" event={"ID":"d8e037c0-d4af-4d74-9e8f-e749db78b81d","Type":"ContainerStarted","Data":"ed870b644072bf2fa2413e0b5652c446c2d69033a7974a8645ddaa6c34918320"} Nov 21 14:01:11 crc kubenswrapper[5133]: I1121 14:01:11.678348 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-znxpd" event={"ID":"99e612e9-ba8d-41cd-9654-4332a4132c4f","Type":"ContainerStarted","Data":"ff5ce27bbd10685a50592b34a7abf5e692d764503938533bcaff9af26558afe7"} Nov 21 14:01:12 crc kubenswrapper[5133]: I1121 14:01:12.690258 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-tzn9t" event={"ID":"71d8bd8e-380d-458b-93b8-7e1a68964294","Type":"ContainerStarted","Data":"e373873a8f58e2f2bdea5490889afb277a79b0c9f70c74c5ed2ec3ed1b293e5b"} Nov 21 14:01:12 crc kubenswrapper[5133]: I1121 14:01:12.692264 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-vt2kw" event={"ID":"2da1ddc9-a310-4a6a-90de-b63c8b33448a","Type":"ContainerStarted","Data":"f4de011c40f99294bfa3f82a08fef98e6176bf75c3ce50ab3f1c28701c18a85b"} Nov 21 14:01:12 crc kubenswrapper[5133]: I1121 14:01:12.694506 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-vpq2d" event={"ID":"167387d2-835f-4d70-8a59-de71037b8178","Type":"ContainerStarted","Data":"99aa367f3d4bd56e50bf554ddfc27a6bb30cddcfbaf4556942aa43739df4f80e"} Nov 21 14:01:13 crc kubenswrapper[5133]: E1121 14:01:13.126058 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-g6kng" podUID="abdbcac3-a654-4558-b71c-6b0a4d6c9c19" Nov 21 14:01:13 crc kubenswrapper[5133]: E1121 14:01:13.167518 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-5z85m" podUID="2036671e-8670-48c3-af60-5eee8087efa7" Nov 21 14:01:13 crc kubenswrapper[5133]: E1121 14:01:13.283357 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-glgx8" podUID="d1eae28c-1654-4cfc-a380-c56e52bdd2d5" Nov 21 14:01:13 crc kubenswrapper[5133]: E1121 14:01:13.451306 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-mwsrn" podUID="a7a7378a-ba96-4a74-9730-61c7e0215843" Nov 21 14:01:13 crc kubenswrapper[5133]: E1121 14:01:13.490893 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/cinder-operator-controller-manager-5c575964b7-fcp4p" podUID="50fccca4-2734-4839-83b0-d220b0dfa1d6" Nov 21 14:01:13 crc kubenswrapper[5133]: E1121 14:01:13.493650 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-mxphc" podUID="a076ae54-b994-453a-9361-2bf9acab8d2d" Nov 21 14:01:13 crc kubenswrapper[5133]: E1121 14:01:13.693958 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-vpjk7" podUID="2b5268ec-12ae-4aee-84f2-f176c3e8f1c3" Nov 21 14:01:13 crc kubenswrapper[5133]: I1121 14:01:13.721687 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-5c575964b7-fcp4p" event={"ID":"50fccca4-2734-4839-83b0-d220b0dfa1d6","Type":"ContainerStarted","Data":"61f262d144652805c38307885f3b7293f69dbf92c50da40f7b44a92d15569e28"} Nov 21 14:01:13 crc kubenswrapper[5133]: E1121 14:01:13.741560 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.2:5001/openstack-k8s-operators/cinder-operator:5c079ca6292ab98f866ec1d60dc6d1dfbd27072c\\\"\"" pod="openstack-operators/cinder-operator-controller-manager-5c575964b7-fcp4p" podUID="50fccca4-2734-4839-83b0-d220b0dfa1d6" Nov 21 14:01:13 crc kubenswrapper[5133]: E1121 14:01:13.752409 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-gqfpz" podUID="28c76d9f-b15c-4b67-b616-88f89fea7eb7" Nov 21 14:01:13 crc kubenswrapper[5133]: I1121 14:01:13.765316 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-kkk8d" event={"ID":"3561dc03-64fa-4b51-b14c-02ef7dc87280","Type":"ContainerStarted","Data":"1ed2e4a7cd3b973f5877a17f4cc5eb9af21f80d0ceb44298c5b25af2c52899cf"} Nov 21 14:01:13 crc kubenswrapper[5133]: I1121 14:01:13.765470 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-kkk8d" Nov 21 14:01:13 crc kubenswrapper[5133]: I1121 14:01:13.784280 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-g6kng" event={"ID":"abdbcac3-a654-4558-b71c-6b0a4d6c9c19","Type":"ContainerStarted","Data":"76c46ee9b8a3bde0531ab321fe76cf602cc5dd4d0e38d95cdbe39762acc8d32d"} Nov 21 14:01:13 crc kubenswrapper[5133]: I1121 14:01:13.804545 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-kkk8d" podStartSLOduration=3.03480857 podStartE2EDuration="35.804516146s" podCreationTimestamp="2025-11-21 14:00:38 +0000 UTC" firstStartedPulling="2025-11-21 14:00:40.525313934 +0000 UTC m=+1100.323146182" lastFinishedPulling="2025-11-21 14:01:13.29502151 +0000 UTC m=+1133.092853758" observedRunningTime="2025-11-21 14:01:13.794354565 +0000 UTC m=+1133.592186813" watchObservedRunningTime="2025-11-21 14:01:13.804516146 +0000 UTC m=+1133.602348394" Nov 21 14:01:13 crc kubenswrapper[5133]: I1121 14:01:13.817209 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-mxphc" event={"ID":"a076ae54-b994-453a-9361-2bf9acab8d2d","Type":"ContainerStarted","Data":"19401b4bbb74a66c8d4cbb564d239855e8b1f799dc1e2b9b8b2363a55a0a84f5"} Nov 21 14:01:13 crc kubenswrapper[5133]: I1121 14:01:13.831105 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-znxpd" event={"ID":"99e612e9-ba8d-41cd-9654-4332a4132c4f","Type":"ContainerStarted","Data":"b1bb6418d5f0af5819dceb1e78591cbd319fc9dbd8f0db17fe40f861d27edcc1"} Nov 21 14:01:13 crc kubenswrapper[5133]: I1121 14:01:13.832042 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-znxpd" Nov 21 14:01:13 crc kubenswrapper[5133]: I1121 14:01:13.845366 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-glgx8" event={"ID":"d1eae28c-1654-4cfc-a380-c56e52bdd2d5","Type":"ContainerStarted","Data":"5228aa3ac88cafce28fcce329538a1bf05e6b4963fe236a967fe4c7fc49a2a93"} Nov 21 14:01:13 crc kubenswrapper[5133]: I1121 14:01:13.852597 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-772lb" event={"ID":"6f45172d-e807-4bb7-b836-a2a6a53beccf","Type":"ContainerStarted","Data":"b6d4043108034f6b468b653698e7b764b9ed558946d69820ef178eb49f2c5827"} Nov 21 14:01:13 crc kubenswrapper[5133]: I1121 14:01:13.853157 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-772lb" Nov 21 14:01:13 crc kubenswrapper[5133]: I1121 14:01:13.894326 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-znxpd" podStartSLOduration=3.285344596 podStartE2EDuration="36.894292248s" podCreationTimestamp="2025-11-21 14:00:37 +0000 UTC" firstStartedPulling="2025-11-21 14:00:39.370907278 +0000 UTC m=+1099.168739526" lastFinishedPulling="2025-11-21 14:01:12.97985493 +0000 UTC m=+1132.777687178" observedRunningTime="2025-11-21 14:01:13.884813534 +0000 UTC m=+1133.682645782" watchObservedRunningTime="2025-11-21 14:01:13.894292248 +0000 UTC m=+1133.692124496" Nov 21 14:01:13 crc kubenswrapper[5133]: I1121 14:01:13.902725 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-864885998-g2fdl" event={"ID":"f2e66c49-bdd0-408a-867b-6e4c869df2a7","Type":"ContainerStarted","Data":"71ee0f684e923eaa155d0c22f03bc497ea64ee70b0a33c80ab90af1fc543ed5b"} Nov 21 14:01:13 crc kubenswrapper[5133]: I1121 14:01:13.902782 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-864885998-g2fdl" event={"ID":"f2e66c49-bdd0-408a-867b-6e4c869df2a7","Type":"ContainerStarted","Data":"b4a52428819cc48dbf8d0583d056a62278388d979e09606e16b2d49043d71d8d"} Nov 21 14:01:13 crc kubenswrapper[5133]: I1121 14:01:13.904031 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-864885998-g2fdl" Nov 21 14:01:14 crc kubenswrapper[5133]: I1121 14:01:14.021724 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-774b86978c-q9q7l" event={"ID":"156c8963-8a3a-4cb2-93ca-a14ab1e2c4a8","Type":"ContainerStarted","Data":"fb1a6c6ffd2c5177e5a84c4a67a355e4dc92c5d9392dd739bb78b0db72288837"} Nov 21 14:01:14 crc kubenswrapper[5133]: I1121 14:01:14.022852 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-774b86978c-q9q7l" Nov 21 14:01:14 crc kubenswrapper[5133]: I1121 14:01:14.030618 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-864885998-g2fdl" podStartSLOduration=5.62331426 podStartE2EDuration="36.030587673s" podCreationTimestamp="2025-11-21 14:00:38 +0000 UTC" firstStartedPulling="2025-11-21 14:00:40.545838092 +0000 UTC m=+1100.343670340" lastFinishedPulling="2025-11-21 14:01:10.953111505 +0000 UTC m=+1130.750943753" observedRunningTime="2025-11-21 14:01:13.950957193 +0000 UTC m=+1133.748789441" watchObservedRunningTime="2025-11-21 14:01:14.030587673 +0000 UTC m=+1133.828419921" Nov 21 14:01:14 crc kubenswrapper[5133]: I1121 14:01:14.033711 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-772lb" podStartSLOduration=3.510036161 podStartE2EDuration="36.033699166s" podCreationTimestamp="2025-11-21 14:00:38 +0000 UTC" firstStartedPulling="2025-11-21 14:00:40.440974568 +0000 UTC m=+1100.238806816" lastFinishedPulling="2025-11-21 14:01:12.964637563 +0000 UTC m=+1132.762469821" observedRunningTime="2025-11-21 14:01:14.024539821 +0000 UTC m=+1133.822372079" watchObservedRunningTime="2025-11-21 14:01:14.033699166 +0000 UTC m=+1133.831531414" Nov 21 14:01:14 crc kubenswrapper[5133]: I1121 14:01:14.075281 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-mwsrn" event={"ID":"a7a7378a-ba96-4a74-9730-61c7e0215843","Type":"ContainerStarted","Data":"6ba8fc8321ca76d7577f2a69d3d5ea4ad25f0f8918ce204ba6947a714b0e6456"} Nov 21 14:01:14 crc kubenswrapper[5133]: I1121 14:01:14.078430 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-774b86978c-q9q7l" podStartSLOduration=3.319265184 podStartE2EDuration="37.078395842s" podCreationTimestamp="2025-11-21 14:00:37 +0000 UTC" firstStartedPulling="2025-11-21 14:00:39.553447811 +0000 UTC m=+1099.351280059" lastFinishedPulling="2025-11-21 14:01:13.312578469 +0000 UTC m=+1133.110410717" observedRunningTime="2025-11-21 14:01:14.071203789 +0000 UTC m=+1133.869036037" watchObservedRunningTime="2025-11-21 14:01:14.078395842 +0000 UTC m=+1133.876228090" Nov 21 14:01:14 crc kubenswrapper[5133]: I1121 14:01:14.088158 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-vpjk7" event={"ID":"2b5268ec-12ae-4aee-84f2-f176c3e8f1c3","Type":"ContainerStarted","Data":"33d8c8399f996f8a5f1afa1edff432a1b2f7c20071ce1e3452598e6bad611daa"} Nov 21 14:01:14 crc kubenswrapper[5133]: I1121 14:01:14.093938 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-5z85m" event={"ID":"2036671e-8670-48c3-af60-5eee8087efa7","Type":"ContainerStarted","Data":"ff7ed4bb7032fc6ceb3b25e1c2d4aebce107d9c0401c55d5e1d148f9ef4cf24d"} Nov 21 14:01:14 crc kubenswrapper[5133]: E1121 14:01:14.095697 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:78852f8ba332a5756c1551c126157f735279101a0fc3277ba4aa4db3478789dd\\\"\"" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-5z85m" podUID="2036671e-8670-48c3-af60-5eee8087efa7" Nov 21 14:01:15 crc kubenswrapper[5133]: I1121 14:01:15.105366 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-mxphc" event={"ID":"a076ae54-b994-453a-9361-2bf9acab8d2d","Type":"ContainerStarted","Data":"cfba07bc8073972802e5bc4291bef516ec8315c2b773fa7bae536766af864570"} Nov 21 14:01:15 crc kubenswrapper[5133]: I1121 14:01:15.107016 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-mxphc" Nov 21 14:01:15 crc kubenswrapper[5133]: I1121 14:01:15.109795 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-g6kng" event={"ID":"abdbcac3-a654-4558-b71c-6b0a4d6c9c19","Type":"ContainerStarted","Data":"54991223876f6ce69b4a54f2e9ae8e4310af651b2633ea39b1f0075d6b0cb84f"} Nov 21 14:01:15 crc kubenswrapper[5133]: I1121 14:01:15.110477 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-g6kng" Nov 21 14:01:15 crc kubenswrapper[5133]: I1121 14:01:15.112641 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-mwsrn" event={"ID":"a7a7378a-ba96-4a74-9730-61c7e0215843","Type":"ContainerStarted","Data":"7fe2506e9173a9608ff7b89451543b8db5574c1f81b08d27be703cdede9ba7e3"} Nov 21 14:01:15 crc kubenswrapper[5133]: I1121 14:01:15.113133 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-mwsrn" Nov 21 14:01:15 crc kubenswrapper[5133]: I1121 14:01:15.116056 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-vpjk7" event={"ID":"2b5268ec-12ae-4aee-84f2-f176c3e8f1c3","Type":"ContainerStarted","Data":"a3be1d84b5aa1d9024f35c23650010c61f7646c2d4eb741ff38fe87308f737f6"} Nov 21 14:01:15 crc kubenswrapper[5133]: I1121 14:01:15.116224 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-vpjk7" Nov 21 14:01:15 crc kubenswrapper[5133]: I1121 14:01:15.118480 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-tzn9t" event={"ID":"71d8bd8e-380d-458b-93b8-7e1a68964294","Type":"ContainerStarted","Data":"df6482099f11520a3aad75ce8e6ee233c31d8943627d57c2b641ea45971c0718"} Nov 21 14:01:15 crc kubenswrapper[5133]: I1121 14:01:15.118564 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-tzn9t" Nov 21 14:01:15 crc kubenswrapper[5133]: I1121 14:01:15.120819 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-4h6lz" event={"ID":"701735b8-41e5-47f9-9ac7-b8cfc0357597","Type":"ContainerStarted","Data":"cb6078ddf1240ad7ef1ee65d3e45a011621dc9a19a13957682d87a135453f658"} Nov 21 14:01:15 crc kubenswrapper[5133]: I1121 14:01:15.121696 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-4h6lz" Nov 21 14:01:15 crc kubenswrapper[5133]: I1121 14:01:15.124428 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-vpq2d" event={"ID":"167387d2-835f-4d70-8a59-de71037b8178","Type":"ContainerStarted","Data":"80f2b2bbf09334771c7456bd1f066b6aee38fb39bebe49bc391fbaba1c4156d4"} Nov 21 14:01:15 crc kubenswrapper[5133]: I1121 14:01:15.125088 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-vpq2d" Nov 21 14:01:15 crc kubenswrapper[5133]: I1121 14:01:15.127217 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-glgx8" event={"ID":"d1eae28c-1654-4cfc-a380-c56e52bdd2d5","Type":"ContainerStarted","Data":"3b99695983e18429b59418a25d809628c7ef4d7337c341e21f98f820136402c4"} Nov 21 14:01:15 crc kubenswrapper[5133]: I1121 14:01:15.127623 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-glgx8" Nov 21 14:01:15 crc kubenswrapper[5133]: I1121 14:01:15.129745 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-mj6sx" event={"ID":"641bce51-c8fb-4956-a13d-e42d7204b3d2","Type":"ContainerStarted","Data":"d1dbe56eeb72fa79bb07be2e78feb1b5ed274ddcc73916bfdf463f1bfc85e234"} Nov 21 14:01:15 crc kubenswrapper[5133]: I1121 14:01:15.130236 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-mj6sx" Nov 21 14:01:15 crc kubenswrapper[5133]: I1121 14:01:15.132431 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-vt2kw" event={"ID":"2da1ddc9-a310-4a6a-90de-b63c8b33448a","Type":"ContainerStarted","Data":"c89f9b8dc67acef8ecb0565d04c28dc879543d7735949027154739582d8fdf35"} Nov 21 14:01:15 crc kubenswrapper[5133]: I1121 14:01:15.132942 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-vt2kw" Nov 21 14:01:15 crc kubenswrapper[5133]: I1121 14:01:15.137934 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-mxphc" podStartSLOduration=3.223365823 podStartE2EDuration="37.137904868s" podCreationTimestamp="2025-11-21 14:00:38 +0000 UTC" firstStartedPulling="2025-11-21 14:00:40.493717509 +0000 UTC m=+1100.291549757" lastFinishedPulling="2025-11-21 14:01:14.408256554 +0000 UTC m=+1134.206088802" observedRunningTime="2025-11-21 14:01:15.13426121 +0000 UTC m=+1134.932093458" watchObservedRunningTime="2025-11-21 14:01:15.137904868 +0000 UTC m=+1134.935737116" Nov 21 14:01:15 crc kubenswrapper[5133]: I1121 14:01:15.139433 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-7g4lm" event={"ID":"0b72b567-d244-4d7e-984b-ba42dfb7be25","Type":"ContainerStarted","Data":"659a8623012618ec38c0a8a573989226682e324a1ba37bc2fadcc80cec442591"} Nov 21 14:01:15 crc kubenswrapper[5133]: I1121 14:01:15.139633 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-7g4lm" Nov 21 14:01:15 crc kubenswrapper[5133]: I1121 14:01:15.142534 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cb74df96-448nh" event={"ID":"d8e037c0-d4af-4d74-9e8f-e749db78b81d","Type":"ContainerStarted","Data":"a52f008a66d0f104e9b95552fd614e1f60fa49ea32b613ef595b5ae887a95f38"} Nov 21 14:01:15 crc kubenswrapper[5133]: I1121 14:01:15.142657 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-5cb74df96-448nh" Nov 21 14:01:15 crc kubenswrapper[5133]: I1121 14:01:15.145364 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-l7bft" event={"ID":"fbcf38a1-6730-49b5-bf19-9966465f2d1b","Type":"ContainerStarted","Data":"29a2541b684f721ed64bba6b52c18a8d6705b1a60201aa267918f520187a9d8a"} Nov 21 14:01:15 crc kubenswrapper[5133]: I1121 14:01:15.145521 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-l7bft" Nov 21 14:01:15 crc kubenswrapper[5133]: I1121 14:01:15.148737 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-gqfpz" event={"ID":"28c76d9f-b15c-4b67-b616-88f89fea7eb7","Type":"ContainerStarted","Data":"3f768fba0e707220f392637cf54e67663a4f88434f13a984d748f5e93703c7a1"} Nov 21 14:01:15 crc kubenswrapper[5133]: E1121 14:01:15.150810 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/keystone-operator@sha256:3ef72bbd7cce89ff54d850ff44ca6d7b2360834a502da3d561aeb6fd3d9af50a\\\"\"" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-gqfpz" podUID="28c76d9f-b15c-4b67-b616-88f89fea7eb7" Nov 21 14:01:15 crc kubenswrapper[5133]: E1121 14:01:15.150827 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.2:5001/openstack-k8s-operators/cinder-operator:5c079ca6292ab98f866ec1d60dc6d1dfbd27072c\\\"\"" pod="openstack-operators/cinder-operator-controller-manager-5c575964b7-fcp4p" podUID="50fccca4-2734-4839-83b0-d220b0dfa1d6" Nov 21 14:01:15 crc kubenswrapper[5133]: E1121 14:01:15.150939 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:78852f8ba332a5756c1551c126157f735279101a0fc3277ba4aa4db3478789dd\\\"\"" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-5z85m" podUID="2036671e-8670-48c3-af60-5eee8087efa7" Nov 21 14:01:15 crc kubenswrapper[5133]: I1121 14:01:15.165321 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-4h6lz" podStartSLOduration=3.260778022 podStartE2EDuration="37.16529591s" podCreationTimestamp="2025-11-21 14:00:38 +0000 UTC" firstStartedPulling="2025-11-21 14:00:39.871140957 +0000 UTC m=+1099.668973205" lastFinishedPulling="2025-11-21 14:01:13.775658845 +0000 UTC m=+1133.573491093" observedRunningTime="2025-11-21 14:01:15.162445424 +0000 UTC m=+1134.960277672" watchObservedRunningTime="2025-11-21 14:01:15.16529591 +0000 UTC m=+1134.963128168" Nov 21 14:01:15 crc kubenswrapper[5133]: I1121 14:01:15.181880 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-mwsrn" podStartSLOduration=3.13095008 podStartE2EDuration="37.181857633s" podCreationTimestamp="2025-11-21 14:00:38 +0000 UTC" firstStartedPulling="2025-11-21 14:00:40.493671287 +0000 UTC m=+1100.291503535" lastFinishedPulling="2025-11-21 14:01:14.54457885 +0000 UTC m=+1134.342411088" observedRunningTime="2025-11-21 14:01:15.178073032 +0000 UTC m=+1134.975905290" watchObservedRunningTime="2025-11-21 14:01:15.181857633 +0000 UTC m=+1134.979689881" Nov 21 14:01:15 crc kubenswrapper[5133]: I1121 14:01:15.205552 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-mj6sx" podStartSLOduration=4.11461138 podStartE2EDuration="37.205520886s" podCreationTimestamp="2025-11-21 14:00:38 +0000 UTC" firstStartedPulling="2025-11-21 14:00:40.493344479 +0000 UTC m=+1100.291176727" lastFinishedPulling="2025-11-21 14:01:13.584253975 +0000 UTC m=+1133.382086233" observedRunningTime="2025-11-21 14:01:15.192884848 +0000 UTC m=+1134.990717116" watchObservedRunningTime="2025-11-21 14:01:15.205520886 +0000 UTC m=+1135.003353134" Nov 21 14:01:15 crc kubenswrapper[5133]: I1121 14:01:15.231191 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-tzn9t" podStartSLOduration=4.338624871 podStartE2EDuration="37.231162782s" podCreationTimestamp="2025-11-21 14:00:38 +0000 UTC" firstStartedPulling="2025-11-21 14:00:40.584315662 +0000 UTC m=+1100.382147910" lastFinishedPulling="2025-11-21 14:01:13.476853573 +0000 UTC m=+1133.274685821" observedRunningTime="2025-11-21 14:01:15.225362947 +0000 UTC m=+1135.023195205" watchObservedRunningTime="2025-11-21 14:01:15.231162782 +0000 UTC m=+1135.028995030" Nov 21 14:01:15 crc kubenswrapper[5133]: I1121 14:01:15.248729 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-vpq2d" podStartSLOduration=3.934304307 podStartE2EDuration="37.248698921s" podCreationTimestamp="2025-11-21 14:00:38 +0000 UTC" firstStartedPulling="2025-11-21 14:00:40.561647355 +0000 UTC m=+1100.359479603" lastFinishedPulling="2025-11-21 14:01:13.876041959 +0000 UTC m=+1133.673874217" observedRunningTime="2025-11-21 14:01:15.247253942 +0000 UTC m=+1135.045086200" watchObservedRunningTime="2025-11-21 14:01:15.248698921 +0000 UTC m=+1135.046531169" Nov 21 14:01:15 crc kubenswrapper[5133]: I1121 14:01:15.273294 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-vt2kw" podStartSLOduration=4.480908606 podStartE2EDuration="37.273245497s" podCreationTimestamp="2025-11-21 14:00:38 +0000 UTC" firstStartedPulling="2025-11-21 14:00:40.561891302 +0000 UTC m=+1100.359723550" lastFinishedPulling="2025-11-21 14:01:13.354228193 +0000 UTC m=+1133.152060441" observedRunningTime="2025-11-21 14:01:15.265291175 +0000 UTC m=+1135.063123423" watchObservedRunningTime="2025-11-21 14:01:15.273245497 +0000 UTC m=+1135.071077745" Nov 21 14:01:15 crc kubenswrapper[5133]: I1121 14:01:15.320244 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-g6kng" podStartSLOduration=4.491583466 podStartE2EDuration="38.320215564s" podCreationTimestamp="2025-11-21 14:00:37 +0000 UTC" firstStartedPulling="2025-11-21 14:00:40.458511037 +0000 UTC m=+1100.256343285" lastFinishedPulling="2025-11-21 14:01:14.287143135 +0000 UTC m=+1134.084975383" observedRunningTime="2025-11-21 14:01:15.297351652 +0000 UTC m=+1135.095183900" watchObservedRunningTime="2025-11-21 14:01:15.320215564 +0000 UTC m=+1135.118047812" Nov 21 14:01:15 crc kubenswrapper[5133]: I1121 14:01:15.351697 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-glgx8" podStartSLOduration=3.435087435 podStartE2EDuration="37.351675795s" podCreationTimestamp="2025-11-21 14:00:38 +0000 UTC" firstStartedPulling="2025-11-21 14:00:40.493235926 +0000 UTC m=+1100.291068174" lastFinishedPulling="2025-11-21 14:01:14.409824286 +0000 UTC m=+1134.207656534" observedRunningTime="2025-11-21 14:01:15.323208624 +0000 UTC m=+1135.121040872" watchObservedRunningTime="2025-11-21 14:01:15.351675795 +0000 UTC m=+1135.149508043" Nov 21 14:01:15 crc kubenswrapper[5133]: I1121 14:01:15.373250 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-vpjk7" podStartSLOduration=3.241407104 podStartE2EDuration="37.373229701s" podCreationTimestamp="2025-11-21 14:00:38 +0000 UTC" firstStartedPulling="2025-11-21 14:00:40.493340098 +0000 UTC m=+1100.291172346" lastFinishedPulling="2025-11-21 14:01:14.625162695 +0000 UTC m=+1134.422994943" observedRunningTime="2025-11-21 14:01:15.349824185 +0000 UTC m=+1135.147656433" watchObservedRunningTime="2025-11-21 14:01:15.373229701 +0000 UTC m=+1135.171061949" Nov 21 14:01:15 crc kubenswrapper[5133]: I1121 14:01:15.438537 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-l7bft" podStartSLOduration=4.369661066 podStartE2EDuration="38.438514098s" podCreationTimestamp="2025-11-21 14:00:37 +0000 UTC" firstStartedPulling="2025-11-21 14:00:39.915083033 +0000 UTC m=+1099.712915281" lastFinishedPulling="2025-11-21 14:01:13.983936065 +0000 UTC m=+1133.781768313" observedRunningTime="2025-11-21 14:01:15.435378764 +0000 UTC m=+1135.233211012" watchObservedRunningTime="2025-11-21 14:01:15.438514098 +0000 UTC m=+1135.236346346" Nov 21 14:01:15 crc kubenswrapper[5133]: I1121 14:01:15.504714 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-7g4lm" podStartSLOduration=4.925699902 podStartE2EDuration="37.504684207s" podCreationTimestamp="2025-11-21 14:00:38 +0000 UTC" firstStartedPulling="2025-11-21 14:00:41.063320363 +0000 UTC m=+1100.861152611" lastFinishedPulling="2025-11-21 14:01:13.642304668 +0000 UTC m=+1133.440136916" observedRunningTime="2025-11-21 14:01:15.500768663 +0000 UTC m=+1135.298600911" watchObservedRunningTime="2025-11-21 14:01:15.504684207 +0000 UTC m=+1135.302516455" Nov 21 14:01:15 crc kubenswrapper[5133]: I1121 14:01:15.525851 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-5cb74df96-448nh" podStartSLOduration=4.4741821569999995 podStartE2EDuration="37.525822813s" podCreationTimestamp="2025-11-21 14:00:38 +0000 UTC" firstStartedPulling="2025-11-21 14:00:40.576240876 +0000 UTC m=+1100.374073124" lastFinishedPulling="2025-11-21 14:01:13.627881522 +0000 UTC m=+1133.425713780" observedRunningTime="2025-11-21 14:01:15.522259957 +0000 UTC m=+1135.320092225" watchObservedRunningTime="2025-11-21 14:01:15.525822813 +0000 UTC m=+1135.323655071" Nov 21 14:01:16 crc kubenswrapper[5133]: I1121 14:01:16.162200 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-4h6lz" Nov 21 14:01:16 crc kubenswrapper[5133]: I1121 14:01:16.163807 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-5cb74df96-448nh" Nov 21 14:01:16 crc kubenswrapper[5133]: I1121 14:01:16.165355 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-znxpd" Nov 21 14:01:16 crc kubenswrapper[5133]: I1121 14:01:16.165429 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-l7bft" Nov 21 14:01:16 crc kubenswrapper[5133]: I1121 14:01:16.165454 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-vpq2d" Nov 21 14:01:16 crc kubenswrapper[5133]: I1121 14:01:16.165598 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-tzn9t" Nov 21 14:01:16 crc kubenswrapper[5133]: I1121 14:01:16.165649 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-772lb" Nov 21 14:01:16 crc kubenswrapper[5133]: I1121 14:01:16.165862 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-774b86978c-q9q7l" Nov 21 14:01:16 crc kubenswrapper[5133]: I1121 14:01:16.166247 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-mj6sx" Nov 21 14:01:16 crc kubenswrapper[5133]: I1121 14:01:16.171596 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-7g4lm" Nov 21 14:01:17 crc kubenswrapper[5133]: I1121 14:01:17.170563 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-gqfpz" event={"ID":"28c76d9f-b15c-4b67-b616-88f89fea7eb7","Type":"ContainerStarted","Data":"89252a4a4758d5dcf7dfaa5ade7dac3e1cb7356cdd5fd18bf0552c2df58450fd"} Nov 21 14:01:17 crc kubenswrapper[5133]: I1121 14:01:17.177586 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-vt2kw" Nov 21 14:01:17 crc kubenswrapper[5133]: I1121 14:01:17.193674 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-gqfpz" podStartSLOduration=2.90245649 podStartE2EDuration="39.193640919s" podCreationTimestamp="2025-11-21 14:00:38 +0000 UTC" firstStartedPulling="2025-11-21 14:00:40.53265513 +0000 UTC m=+1100.330487378" lastFinishedPulling="2025-11-21 14:01:16.823839549 +0000 UTC m=+1136.621671807" observedRunningTime="2025-11-21 14:01:17.193203867 +0000 UTC m=+1136.991036185" watchObservedRunningTime="2025-11-21 14:01:17.193640919 +0000 UTC m=+1136.991473197" Nov 21 14:01:18 crc kubenswrapper[5133]: I1121 14:01:18.775679 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-gqfpz" Nov 21 14:01:19 crc kubenswrapper[5133]: I1121 14:01:19.150297 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-kkk8d" Nov 21 14:01:19 crc kubenswrapper[5133]: I1121 14:01:19.372563 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-864885998-g2fdl" Nov 21 14:01:21 crc kubenswrapper[5133]: I1121 14:01:21.207765 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-lfntj" event={"ID":"167b5c97-691f-4078-ac5f-e849a162b136","Type":"ContainerStarted","Data":"7891fd974c66601a14d8efa6cdb66a9a658dca46be36d917a1648927aa521b84"} Nov 21 14:01:21 crc kubenswrapper[5133]: I1121 14:01:21.234967 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-lfntj" podStartSLOduration=2.742748615 podStartE2EDuration="42.234930436s" podCreationTimestamp="2025-11-21 14:00:39 +0000 UTC" firstStartedPulling="2025-11-21 14:00:40.486519776 +0000 UTC m=+1100.284352024" lastFinishedPulling="2025-11-21 14:01:19.978701557 +0000 UTC m=+1139.776533845" observedRunningTime="2025-11-21 14:01:21.226881221 +0000 UTC m=+1141.024713479" watchObservedRunningTime="2025-11-21 14:01:21.234930436 +0000 UTC m=+1141.032762694" Nov 21 14:01:27 crc kubenswrapper[5133]: I1121 14:01:27.276279 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-5c575964b7-fcp4p" event={"ID":"50fccca4-2734-4839-83b0-d220b0dfa1d6","Type":"ContainerStarted","Data":"0cfb118cfcccbae60198acebb54bdec7513ed5c2d5685cf4efff986b6450c5bd"} Nov 21 14:01:27 crc kubenswrapper[5133]: I1121 14:01:27.277371 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-5c575964b7-fcp4p" Nov 21 14:01:27 crc kubenswrapper[5133]: I1121 14:01:27.278431 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-5z85m" event={"ID":"2036671e-8670-48c3-af60-5eee8087efa7","Type":"ContainerStarted","Data":"0d00a1684bf42637c42c0c9a71db2aae208083a100af49255aa14745921cd5d3"} Nov 21 14:01:27 crc kubenswrapper[5133]: I1121 14:01:27.278667 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-5z85m" Nov 21 14:01:27 crc kubenswrapper[5133]: I1121 14:01:27.306288 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-5c575964b7-fcp4p" podStartSLOduration=3.6055155880000003 podStartE2EDuration="50.306254498s" podCreationTimestamp="2025-11-21 14:00:37 +0000 UTC" firstStartedPulling="2025-11-21 14:00:39.914663761 +0000 UTC m=+1099.712496009" lastFinishedPulling="2025-11-21 14:01:26.615402671 +0000 UTC m=+1146.413234919" observedRunningTime="2025-11-21 14:01:27.297517824 +0000 UTC m=+1147.095350092" watchObservedRunningTime="2025-11-21 14:01:27.306254498 +0000 UTC m=+1147.104086766" Nov 21 14:01:27 crc kubenswrapper[5133]: I1121 14:01:27.335020 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-5z85m" podStartSLOduration=5.49958037 podStartE2EDuration="49.334969466s" podCreationTimestamp="2025-11-21 14:00:38 +0000 UTC" firstStartedPulling="2025-11-21 14:00:43.037160563 +0000 UTC m=+1102.834992811" lastFinishedPulling="2025-11-21 14:01:26.872549669 +0000 UTC m=+1146.670381907" observedRunningTime="2025-11-21 14:01:27.328335529 +0000 UTC m=+1147.126167787" watchObservedRunningTime="2025-11-21 14:01:27.334969466 +0000 UTC m=+1147.132801724" Nov 21 14:01:28 crc kubenswrapper[5133]: I1121 14:01:28.361705 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-g6kng" Nov 21 14:01:28 crc kubenswrapper[5133]: I1121 14:01:28.714025 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-glgx8" Nov 21 14:01:28 crc kubenswrapper[5133]: I1121 14:01:28.792460 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-gqfpz" Nov 21 14:01:28 crc kubenswrapper[5133]: I1121 14:01:28.861736 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-mwsrn" Nov 21 14:01:28 crc kubenswrapper[5133]: I1121 14:01:28.963799 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-mxphc" Nov 21 14:01:28 crc kubenswrapper[5133]: I1121 14:01:28.964331 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-vpjk7" Nov 21 14:01:32 crc kubenswrapper[5133]: I1121 14:01:32.634461 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-544b9bb9-5z85m" Nov 21 14:01:38 crc kubenswrapper[5133]: I1121 14:01:38.301289 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-5c575964b7-fcp4p" Nov 21 14:01:56 crc kubenswrapper[5133]: I1121 14:01:56.746132 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-82rwv"] Nov 21 14:01:56 crc kubenswrapper[5133]: I1121 14:01:56.748571 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-82rwv" Nov 21 14:01:56 crc kubenswrapper[5133]: I1121 14:01:56.754162 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Nov 21 14:01:56 crc kubenswrapper[5133]: I1121 14:01:56.754241 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-mzsxp" Nov 21 14:01:56 crc kubenswrapper[5133]: I1121 14:01:56.754474 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Nov 21 14:01:56 crc kubenswrapper[5133]: I1121 14:01:56.754518 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Nov 21 14:01:56 crc kubenswrapper[5133]: I1121 14:01:56.755597 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-82rwv"] Nov 21 14:01:56 crc kubenswrapper[5133]: I1121 14:01:56.819289 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-rdgf8"] Nov 21 14:01:56 crc kubenswrapper[5133]: I1121 14:01:56.821504 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-rdgf8" Nov 21 14:01:56 crc kubenswrapper[5133]: I1121 14:01:56.823781 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Nov 21 14:01:56 crc kubenswrapper[5133]: I1121 14:01:56.833158 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6a14f3d4-e942-46bd-9f9a-af43b1856214-config\") pod \"dnsmasq-dns-675f4bcbfc-82rwv\" (UID: \"6a14f3d4-e942-46bd-9f9a-af43b1856214\") " pod="openstack/dnsmasq-dns-675f4bcbfc-82rwv" Nov 21 14:01:56 crc kubenswrapper[5133]: I1121 14:01:56.833249 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6pfgv\" (UniqueName: \"kubernetes.io/projected/6a14f3d4-e942-46bd-9f9a-af43b1856214-kube-api-access-6pfgv\") pod \"dnsmasq-dns-675f4bcbfc-82rwv\" (UID: \"6a14f3d4-e942-46bd-9f9a-af43b1856214\") " pod="openstack/dnsmasq-dns-675f4bcbfc-82rwv" Nov 21 14:01:56 crc kubenswrapper[5133]: I1121 14:01:56.833267 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-rdgf8"] Nov 21 14:01:56 crc kubenswrapper[5133]: I1121 14:01:56.934670 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6a14f3d4-e942-46bd-9f9a-af43b1856214-config\") pod \"dnsmasq-dns-675f4bcbfc-82rwv\" (UID: \"6a14f3d4-e942-46bd-9f9a-af43b1856214\") " pod="openstack/dnsmasq-dns-675f4bcbfc-82rwv" Nov 21 14:01:56 crc kubenswrapper[5133]: I1121 14:01:56.935112 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lvgxq\" (UniqueName: \"kubernetes.io/projected/4097bd51-e085-4017-9ec5-cfbb86d1cc08-kube-api-access-lvgxq\") pod \"dnsmasq-dns-78dd6ddcc-rdgf8\" (UID: \"4097bd51-e085-4017-9ec5-cfbb86d1cc08\") " pod="openstack/dnsmasq-dns-78dd6ddcc-rdgf8" Nov 21 14:01:56 crc kubenswrapper[5133]: I1121 14:01:56.935299 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6pfgv\" (UniqueName: \"kubernetes.io/projected/6a14f3d4-e942-46bd-9f9a-af43b1856214-kube-api-access-6pfgv\") pod \"dnsmasq-dns-675f4bcbfc-82rwv\" (UID: \"6a14f3d4-e942-46bd-9f9a-af43b1856214\") " pod="openstack/dnsmasq-dns-675f4bcbfc-82rwv" Nov 21 14:01:56 crc kubenswrapper[5133]: I1121 14:01:56.935430 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4097bd51-e085-4017-9ec5-cfbb86d1cc08-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-rdgf8\" (UID: \"4097bd51-e085-4017-9ec5-cfbb86d1cc08\") " pod="openstack/dnsmasq-dns-78dd6ddcc-rdgf8" Nov 21 14:01:56 crc kubenswrapper[5133]: I1121 14:01:56.935576 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4097bd51-e085-4017-9ec5-cfbb86d1cc08-config\") pod \"dnsmasq-dns-78dd6ddcc-rdgf8\" (UID: \"4097bd51-e085-4017-9ec5-cfbb86d1cc08\") " pod="openstack/dnsmasq-dns-78dd6ddcc-rdgf8" Nov 21 14:01:56 crc kubenswrapper[5133]: I1121 14:01:56.935880 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6a14f3d4-e942-46bd-9f9a-af43b1856214-config\") pod \"dnsmasq-dns-675f4bcbfc-82rwv\" (UID: \"6a14f3d4-e942-46bd-9f9a-af43b1856214\") " pod="openstack/dnsmasq-dns-675f4bcbfc-82rwv" Nov 21 14:01:56 crc kubenswrapper[5133]: I1121 14:01:56.959239 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6pfgv\" (UniqueName: \"kubernetes.io/projected/6a14f3d4-e942-46bd-9f9a-af43b1856214-kube-api-access-6pfgv\") pod \"dnsmasq-dns-675f4bcbfc-82rwv\" (UID: \"6a14f3d4-e942-46bd-9f9a-af43b1856214\") " pod="openstack/dnsmasq-dns-675f4bcbfc-82rwv" Nov 21 14:01:57 crc kubenswrapper[5133]: I1121 14:01:57.036816 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lvgxq\" (UniqueName: \"kubernetes.io/projected/4097bd51-e085-4017-9ec5-cfbb86d1cc08-kube-api-access-lvgxq\") pod \"dnsmasq-dns-78dd6ddcc-rdgf8\" (UID: \"4097bd51-e085-4017-9ec5-cfbb86d1cc08\") " pod="openstack/dnsmasq-dns-78dd6ddcc-rdgf8" Nov 21 14:01:57 crc kubenswrapper[5133]: I1121 14:01:57.036933 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4097bd51-e085-4017-9ec5-cfbb86d1cc08-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-rdgf8\" (UID: \"4097bd51-e085-4017-9ec5-cfbb86d1cc08\") " pod="openstack/dnsmasq-dns-78dd6ddcc-rdgf8" Nov 21 14:01:57 crc kubenswrapper[5133]: I1121 14:01:57.036983 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4097bd51-e085-4017-9ec5-cfbb86d1cc08-config\") pod \"dnsmasq-dns-78dd6ddcc-rdgf8\" (UID: \"4097bd51-e085-4017-9ec5-cfbb86d1cc08\") " pod="openstack/dnsmasq-dns-78dd6ddcc-rdgf8" Nov 21 14:01:57 crc kubenswrapper[5133]: I1121 14:01:57.038329 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4097bd51-e085-4017-9ec5-cfbb86d1cc08-config\") pod \"dnsmasq-dns-78dd6ddcc-rdgf8\" (UID: \"4097bd51-e085-4017-9ec5-cfbb86d1cc08\") " pod="openstack/dnsmasq-dns-78dd6ddcc-rdgf8" Nov 21 14:01:57 crc kubenswrapper[5133]: I1121 14:01:57.038389 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4097bd51-e085-4017-9ec5-cfbb86d1cc08-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-rdgf8\" (UID: \"4097bd51-e085-4017-9ec5-cfbb86d1cc08\") " pod="openstack/dnsmasq-dns-78dd6ddcc-rdgf8" Nov 21 14:01:57 crc kubenswrapper[5133]: I1121 14:01:57.054353 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lvgxq\" (UniqueName: \"kubernetes.io/projected/4097bd51-e085-4017-9ec5-cfbb86d1cc08-kube-api-access-lvgxq\") pod \"dnsmasq-dns-78dd6ddcc-rdgf8\" (UID: \"4097bd51-e085-4017-9ec5-cfbb86d1cc08\") " pod="openstack/dnsmasq-dns-78dd6ddcc-rdgf8" Nov 21 14:01:57 crc kubenswrapper[5133]: I1121 14:01:57.083257 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-82rwv" Nov 21 14:01:57 crc kubenswrapper[5133]: I1121 14:01:57.140366 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-rdgf8" Nov 21 14:01:57 crc kubenswrapper[5133]: I1121 14:01:57.548169 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-82rwv"] Nov 21 14:01:57 crc kubenswrapper[5133]: I1121 14:01:57.618284 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-rdgf8"] Nov 21 14:01:57 crc kubenswrapper[5133]: W1121 14:01:57.623319 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4097bd51_e085_4017_9ec5_cfbb86d1cc08.slice/crio-bf4d0338aa894406f66627466c1f1fdbcff51a22f424ea690cb88987a7f9409c WatchSource:0}: Error finding container bf4d0338aa894406f66627466c1f1fdbcff51a22f424ea690cb88987a7f9409c: Status 404 returned error can't find the container with id bf4d0338aa894406f66627466c1f1fdbcff51a22f424ea690cb88987a7f9409c Nov 21 14:01:58 crc kubenswrapper[5133]: I1121 14:01:58.576952 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-rdgf8" event={"ID":"4097bd51-e085-4017-9ec5-cfbb86d1cc08","Type":"ContainerStarted","Data":"bf4d0338aa894406f66627466c1f1fdbcff51a22f424ea690cb88987a7f9409c"} Nov 21 14:01:58 crc kubenswrapper[5133]: I1121 14:01:58.586401 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-82rwv" event={"ID":"6a14f3d4-e942-46bd-9f9a-af43b1856214","Type":"ContainerStarted","Data":"cdb28e5f9dc955a553ec7fd2eb87aa9de60287435b56f6287efce6f7d42a2e2d"} Nov 21 14:01:58 crc kubenswrapper[5133]: I1121 14:01:58.643564 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-82rwv"] Nov 21 14:01:58 crc kubenswrapper[5133]: I1121 14:01:58.662595 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-k5kck"] Nov 21 14:01:58 crc kubenswrapper[5133]: I1121 14:01:58.668673 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-k5kck" Nov 21 14:01:58 crc kubenswrapper[5133]: I1121 14:01:58.699191 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-k5kck"] Nov 21 14:01:58 crc kubenswrapper[5133]: I1121 14:01:58.869357 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/86efb203-682b-4933-acc2-14cf239d4770-config\") pod \"dnsmasq-dns-666b6646f7-k5kck\" (UID: \"86efb203-682b-4933-acc2-14cf239d4770\") " pod="openstack/dnsmasq-dns-666b6646f7-k5kck" Nov 21 14:01:58 crc kubenswrapper[5133]: I1121 14:01:58.869418 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/86efb203-682b-4933-acc2-14cf239d4770-dns-svc\") pod \"dnsmasq-dns-666b6646f7-k5kck\" (UID: \"86efb203-682b-4933-acc2-14cf239d4770\") " pod="openstack/dnsmasq-dns-666b6646f7-k5kck" Nov 21 14:01:58 crc kubenswrapper[5133]: I1121 14:01:58.869541 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bggrg\" (UniqueName: \"kubernetes.io/projected/86efb203-682b-4933-acc2-14cf239d4770-kube-api-access-bggrg\") pod \"dnsmasq-dns-666b6646f7-k5kck\" (UID: \"86efb203-682b-4933-acc2-14cf239d4770\") " pod="openstack/dnsmasq-dns-666b6646f7-k5kck" Nov 21 14:01:58 crc kubenswrapper[5133]: I1121 14:01:58.973583 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bggrg\" (UniqueName: \"kubernetes.io/projected/86efb203-682b-4933-acc2-14cf239d4770-kube-api-access-bggrg\") pod \"dnsmasq-dns-666b6646f7-k5kck\" (UID: \"86efb203-682b-4933-acc2-14cf239d4770\") " pod="openstack/dnsmasq-dns-666b6646f7-k5kck" Nov 21 14:01:58 crc kubenswrapper[5133]: I1121 14:01:58.973701 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/86efb203-682b-4933-acc2-14cf239d4770-config\") pod \"dnsmasq-dns-666b6646f7-k5kck\" (UID: \"86efb203-682b-4933-acc2-14cf239d4770\") " pod="openstack/dnsmasq-dns-666b6646f7-k5kck" Nov 21 14:01:58 crc kubenswrapper[5133]: I1121 14:01:58.973725 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/86efb203-682b-4933-acc2-14cf239d4770-dns-svc\") pod \"dnsmasq-dns-666b6646f7-k5kck\" (UID: \"86efb203-682b-4933-acc2-14cf239d4770\") " pod="openstack/dnsmasq-dns-666b6646f7-k5kck" Nov 21 14:01:58 crc kubenswrapper[5133]: I1121 14:01:58.974960 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/86efb203-682b-4933-acc2-14cf239d4770-dns-svc\") pod \"dnsmasq-dns-666b6646f7-k5kck\" (UID: \"86efb203-682b-4933-acc2-14cf239d4770\") " pod="openstack/dnsmasq-dns-666b6646f7-k5kck" Nov 21 14:01:58 crc kubenswrapper[5133]: I1121 14:01:58.976045 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/86efb203-682b-4933-acc2-14cf239d4770-config\") pod \"dnsmasq-dns-666b6646f7-k5kck\" (UID: \"86efb203-682b-4933-acc2-14cf239d4770\") " pod="openstack/dnsmasq-dns-666b6646f7-k5kck" Nov 21 14:01:59 crc kubenswrapper[5133]: I1121 14:01:59.033754 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-rdgf8"] Nov 21 14:01:59 crc kubenswrapper[5133]: I1121 14:01:59.037149 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bggrg\" (UniqueName: \"kubernetes.io/projected/86efb203-682b-4933-acc2-14cf239d4770-kube-api-access-bggrg\") pod \"dnsmasq-dns-666b6646f7-k5kck\" (UID: \"86efb203-682b-4933-acc2-14cf239d4770\") " pod="openstack/dnsmasq-dns-666b6646f7-k5kck" Nov 21 14:01:59 crc kubenswrapper[5133]: I1121 14:01:59.068143 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-drqnz"] Nov 21 14:01:59 crc kubenswrapper[5133]: I1121 14:01:59.096891 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-drqnz"] Nov 21 14:01:59 crc kubenswrapper[5133]: I1121 14:01:59.097050 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-drqnz" Nov 21 14:01:59 crc kubenswrapper[5133]: I1121 14:01:59.282170 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/50c34142-ef58-4e11-acce-613c693cc483-config\") pod \"dnsmasq-dns-57d769cc4f-drqnz\" (UID: \"50c34142-ef58-4e11-acce-613c693cc483\") " pod="openstack/dnsmasq-dns-57d769cc4f-drqnz" Nov 21 14:01:59 crc kubenswrapper[5133]: I1121 14:01:59.282686 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/50c34142-ef58-4e11-acce-613c693cc483-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-drqnz\" (UID: \"50c34142-ef58-4e11-acce-613c693cc483\") " pod="openstack/dnsmasq-dns-57d769cc4f-drqnz" Nov 21 14:01:59 crc kubenswrapper[5133]: I1121 14:01:59.282718 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t7q2s\" (UniqueName: \"kubernetes.io/projected/50c34142-ef58-4e11-acce-613c693cc483-kube-api-access-t7q2s\") pod \"dnsmasq-dns-57d769cc4f-drqnz\" (UID: \"50c34142-ef58-4e11-acce-613c693cc483\") " pod="openstack/dnsmasq-dns-57d769cc4f-drqnz" Nov 21 14:01:59 crc kubenswrapper[5133]: I1121 14:01:59.299965 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-k5kck" Nov 21 14:01:59 crc kubenswrapper[5133]: I1121 14:01:59.386586 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t7q2s\" (UniqueName: \"kubernetes.io/projected/50c34142-ef58-4e11-acce-613c693cc483-kube-api-access-t7q2s\") pod \"dnsmasq-dns-57d769cc4f-drqnz\" (UID: \"50c34142-ef58-4e11-acce-613c693cc483\") " pod="openstack/dnsmasq-dns-57d769cc4f-drqnz" Nov 21 14:01:59 crc kubenswrapper[5133]: I1121 14:01:59.386693 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/50c34142-ef58-4e11-acce-613c693cc483-config\") pod \"dnsmasq-dns-57d769cc4f-drqnz\" (UID: \"50c34142-ef58-4e11-acce-613c693cc483\") " pod="openstack/dnsmasq-dns-57d769cc4f-drqnz" Nov 21 14:01:59 crc kubenswrapper[5133]: I1121 14:01:59.386742 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/50c34142-ef58-4e11-acce-613c693cc483-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-drqnz\" (UID: \"50c34142-ef58-4e11-acce-613c693cc483\") " pod="openstack/dnsmasq-dns-57d769cc4f-drqnz" Nov 21 14:01:59 crc kubenswrapper[5133]: I1121 14:01:59.387921 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/50c34142-ef58-4e11-acce-613c693cc483-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-drqnz\" (UID: \"50c34142-ef58-4e11-acce-613c693cc483\") " pod="openstack/dnsmasq-dns-57d769cc4f-drqnz" Nov 21 14:01:59 crc kubenswrapper[5133]: I1121 14:01:59.388404 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/50c34142-ef58-4e11-acce-613c693cc483-config\") pod \"dnsmasq-dns-57d769cc4f-drqnz\" (UID: \"50c34142-ef58-4e11-acce-613c693cc483\") " pod="openstack/dnsmasq-dns-57d769cc4f-drqnz" Nov 21 14:01:59 crc kubenswrapper[5133]: I1121 14:01:59.413263 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t7q2s\" (UniqueName: \"kubernetes.io/projected/50c34142-ef58-4e11-acce-613c693cc483-kube-api-access-t7q2s\") pod \"dnsmasq-dns-57d769cc4f-drqnz\" (UID: \"50c34142-ef58-4e11-acce-613c693cc483\") " pod="openstack/dnsmasq-dns-57d769cc4f-drqnz" Nov 21 14:01:59 crc kubenswrapper[5133]: I1121 14:01:59.437547 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-drqnz" Nov 21 14:01:59 crc kubenswrapper[5133]: I1121 14:01:59.837194 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 21 14:01:59 crc kubenswrapper[5133]: I1121 14:01:59.843245 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 21 14:01:59 crc kubenswrapper[5133]: I1121 14:01:59.849199 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 21 14:01:59 crc kubenswrapper[5133]: I1121 14:01:59.849700 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 21 14:01:59 crc kubenswrapper[5133]: I1121 14:01:59.850302 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 21 14:01:59 crc kubenswrapper[5133]: I1121 14:01:59.850649 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 21 14:01:59 crc kubenswrapper[5133]: I1121 14:01:59.850695 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 21 14:01:59 crc kubenswrapper[5133]: I1121 14:01:59.850838 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-knfp8" Nov 21 14:01:59 crc kubenswrapper[5133]: I1121 14:01:59.853065 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 21 14:01:59 crc kubenswrapper[5133]: I1121 14:01:59.855658 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 21 14:01:59 crc kubenswrapper[5133]: I1121 14:01:59.892902 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-k5kck"] Nov 21 14:01:59 crc kubenswrapper[5133]: W1121 14:01:59.905535 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod86efb203_682b_4933_acc2_14cf239d4770.slice/crio-494dbcf89e7ed85ff68e9931b6f79e7fddb8130e1fe87af84a074f36a6479c08 WatchSource:0}: Error finding container 494dbcf89e7ed85ff68e9931b6f79e7fddb8130e1fe87af84a074f36a6479c08: Status 404 returned error can't find the container with id 494dbcf89e7ed85ff68e9931b6f79e7fddb8130e1fe87af84a074f36a6479c08 Nov 21 14:01:59 crc kubenswrapper[5133]: I1121 14:01:59.998801 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/9aa1caed-f687-4526-a851-59b4d192b705-pod-info\") pod \"rabbitmq-server-0\" (UID: \"9aa1caed-f687-4526-a851-59b4d192b705\") " pod="openstack/rabbitmq-server-0" Nov 21 14:01:59 crc kubenswrapper[5133]: I1121 14:01:59.998863 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/9aa1caed-f687-4526-a851-59b4d192b705-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"9aa1caed-f687-4526-a851-59b4d192b705\") " pod="openstack/rabbitmq-server-0" Nov 21 14:01:59 crc kubenswrapper[5133]: I1121 14:01:59.998895 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9aa1caed-f687-4526-a851-59b4d192b705-config-data\") pod \"rabbitmq-server-0\" (UID: \"9aa1caed-f687-4526-a851-59b4d192b705\") " pod="openstack/rabbitmq-server-0" Nov 21 14:01:59 crc kubenswrapper[5133]: I1121 14:01:59.998921 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/9aa1caed-f687-4526-a851-59b4d192b705-server-conf\") pod \"rabbitmq-server-0\" (UID: \"9aa1caed-f687-4526-a851-59b4d192b705\") " pod="openstack/rabbitmq-server-0" Nov 21 14:01:59 crc kubenswrapper[5133]: I1121 14:01:59.998941 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/9aa1caed-f687-4526-a851-59b4d192b705-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"9aa1caed-f687-4526-a851-59b4d192b705\") " pod="openstack/rabbitmq-server-0" Nov 21 14:01:59 crc kubenswrapper[5133]: I1121 14:01:59.998965 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"9aa1caed-f687-4526-a851-59b4d192b705\") " pod="openstack/rabbitmq-server-0" Nov 21 14:01:59 crc kubenswrapper[5133]: I1121 14:01:59.998986 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-22d9n\" (UniqueName: \"kubernetes.io/projected/9aa1caed-f687-4526-a851-59b4d192b705-kube-api-access-22d9n\") pod \"rabbitmq-server-0\" (UID: \"9aa1caed-f687-4526-a851-59b4d192b705\") " pod="openstack/rabbitmq-server-0" Nov 21 14:01:59 crc kubenswrapper[5133]: I1121 14:01:59.999023 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/9aa1caed-f687-4526-a851-59b4d192b705-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"9aa1caed-f687-4526-a851-59b4d192b705\") " pod="openstack/rabbitmq-server-0" Nov 21 14:01:59 crc kubenswrapper[5133]: I1121 14:01:59.999041 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/9aa1caed-f687-4526-a851-59b4d192b705-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"9aa1caed-f687-4526-a851-59b4d192b705\") " pod="openstack/rabbitmq-server-0" Nov 21 14:01:59 crc kubenswrapper[5133]: I1121 14:01:59.999089 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/9aa1caed-f687-4526-a851-59b4d192b705-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"9aa1caed-f687-4526-a851-59b4d192b705\") " pod="openstack/rabbitmq-server-0" Nov 21 14:01:59 crc kubenswrapper[5133]: I1121 14:01:59.999120 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/9aa1caed-f687-4526-a851-59b4d192b705-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"9aa1caed-f687-4526-a851-59b4d192b705\") " pod="openstack/rabbitmq-server-0" Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.023870 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-drqnz"] Nov 21 14:02:00 crc kubenswrapper[5133]: W1121 14:02:00.052307 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod50c34142_ef58_4e11_acce_613c693cc483.slice/crio-0b5e38935b8567077df4bfa162a1bc8239cd3a969ba65d96045d76a83f672d42 WatchSource:0}: Error finding container 0b5e38935b8567077df4bfa162a1bc8239cd3a969ba65d96045d76a83f672d42: Status 404 returned error can't find the container with id 0b5e38935b8567077df4bfa162a1bc8239cd3a969ba65d96045d76a83f672d42 Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.101083 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/9aa1caed-f687-4526-a851-59b4d192b705-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"9aa1caed-f687-4526-a851-59b4d192b705\") " pod="openstack/rabbitmq-server-0" Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.101150 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/9aa1caed-f687-4526-a851-59b4d192b705-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"9aa1caed-f687-4526-a851-59b4d192b705\") " pod="openstack/rabbitmq-server-0" Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.101205 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/9aa1caed-f687-4526-a851-59b4d192b705-pod-info\") pod \"rabbitmq-server-0\" (UID: \"9aa1caed-f687-4526-a851-59b4d192b705\") " pod="openstack/rabbitmq-server-0" Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.101239 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/9aa1caed-f687-4526-a851-59b4d192b705-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"9aa1caed-f687-4526-a851-59b4d192b705\") " pod="openstack/rabbitmq-server-0" Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.101262 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9aa1caed-f687-4526-a851-59b4d192b705-config-data\") pod \"rabbitmq-server-0\" (UID: \"9aa1caed-f687-4526-a851-59b4d192b705\") " pod="openstack/rabbitmq-server-0" Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.101283 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/9aa1caed-f687-4526-a851-59b4d192b705-server-conf\") pod \"rabbitmq-server-0\" (UID: \"9aa1caed-f687-4526-a851-59b4d192b705\") " pod="openstack/rabbitmq-server-0" Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.101301 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/9aa1caed-f687-4526-a851-59b4d192b705-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"9aa1caed-f687-4526-a851-59b4d192b705\") " pod="openstack/rabbitmq-server-0" Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.101320 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"9aa1caed-f687-4526-a851-59b4d192b705\") " pod="openstack/rabbitmq-server-0" Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.101340 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-22d9n\" (UniqueName: \"kubernetes.io/projected/9aa1caed-f687-4526-a851-59b4d192b705-kube-api-access-22d9n\") pod \"rabbitmq-server-0\" (UID: \"9aa1caed-f687-4526-a851-59b4d192b705\") " pod="openstack/rabbitmq-server-0" Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.101356 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/9aa1caed-f687-4526-a851-59b4d192b705-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"9aa1caed-f687-4526-a851-59b4d192b705\") " pod="openstack/rabbitmq-server-0" Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.101371 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/9aa1caed-f687-4526-a851-59b4d192b705-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"9aa1caed-f687-4526-a851-59b4d192b705\") " pod="openstack/rabbitmq-server-0" Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.102164 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/9aa1caed-f687-4526-a851-59b4d192b705-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"9aa1caed-f687-4526-a851-59b4d192b705\") " pod="openstack/rabbitmq-server-0" Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.102229 5133 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"9aa1caed-f687-4526-a851-59b4d192b705\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/rabbitmq-server-0" Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.105350 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/9aa1caed-f687-4526-a851-59b4d192b705-server-conf\") pod \"rabbitmq-server-0\" (UID: \"9aa1caed-f687-4526-a851-59b4d192b705\") " pod="openstack/rabbitmq-server-0" Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.105880 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/9aa1caed-f687-4526-a851-59b4d192b705-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"9aa1caed-f687-4526-a851-59b4d192b705\") " pod="openstack/rabbitmq-server-0" Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.106929 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/9aa1caed-f687-4526-a851-59b4d192b705-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"9aa1caed-f687-4526-a851-59b4d192b705\") " pod="openstack/rabbitmq-server-0" Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.108692 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9aa1caed-f687-4526-a851-59b4d192b705-config-data\") pod \"rabbitmq-server-0\" (UID: \"9aa1caed-f687-4526-a851-59b4d192b705\") " pod="openstack/rabbitmq-server-0" Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.108796 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/9aa1caed-f687-4526-a851-59b4d192b705-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"9aa1caed-f687-4526-a851-59b4d192b705\") " pod="openstack/rabbitmq-server-0" Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.109601 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/9aa1caed-f687-4526-a851-59b4d192b705-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"9aa1caed-f687-4526-a851-59b4d192b705\") " pod="openstack/rabbitmq-server-0" Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.116969 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/9aa1caed-f687-4526-a851-59b4d192b705-pod-info\") pod \"rabbitmq-server-0\" (UID: \"9aa1caed-f687-4526-a851-59b4d192b705\") " pod="openstack/rabbitmq-server-0" Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.118521 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/9aa1caed-f687-4526-a851-59b4d192b705-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"9aa1caed-f687-4526-a851-59b4d192b705\") " pod="openstack/rabbitmq-server-0" Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.129359 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"9aa1caed-f687-4526-a851-59b4d192b705\") " pod="openstack/rabbitmq-server-0" Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.130651 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-22d9n\" (UniqueName: \"kubernetes.io/projected/9aa1caed-f687-4526-a851-59b4d192b705-kube-api-access-22d9n\") pod \"rabbitmq-server-0\" (UID: \"9aa1caed-f687-4526-a851-59b4d192b705\") " pod="openstack/rabbitmq-server-0" Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.171785 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.191405 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.192848 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.192945 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.203397 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.203545 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.203654 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.203711 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.203759 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.203887 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.204635 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-jqhmq" Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.315500 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/2c9873f2-025e-499f-8a76-47c38495fd75-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"2c9873f2-025e-499f-8a76-47c38495fd75\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.316194 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/2c9873f2-025e-499f-8a76-47c38495fd75-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"2c9873f2-025e-499f-8a76-47c38495fd75\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.316249 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/2c9873f2-025e-499f-8a76-47c38495fd75-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"2c9873f2-025e-499f-8a76-47c38495fd75\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.316290 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/2c9873f2-025e-499f-8a76-47c38495fd75-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"2c9873f2-025e-499f-8a76-47c38495fd75\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.316328 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/2c9873f2-025e-499f-8a76-47c38495fd75-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"2c9873f2-025e-499f-8a76-47c38495fd75\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.316353 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2c9873f2-025e-499f-8a76-47c38495fd75-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"2c9873f2-025e-499f-8a76-47c38495fd75\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.316372 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"2c9873f2-025e-499f-8a76-47c38495fd75\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.316390 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/2c9873f2-025e-499f-8a76-47c38495fd75-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"2c9873f2-025e-499f-8a76-47c38495fd75\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.316428 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/2c9873f2-025e-499f-8a76-47c38495fd75-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"2c9873f2-025e-499f-8a76-47c38495fd75\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.316444 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f4scz\" (UniqueName: \"kubernetes.io/projected/2c9873f2-025e-499f-8a76-47c38495fd75-kube-api-access-f4scz\") pod \"rabbitmq-cell1-server-0\" (UID: \"2c9873f2-025e-499f-8a76-47c38495fd75\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.316473 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/2c9873f2-025e-499f-8a76-47c38495fd75-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"2c9873f2-025e-499f-8a76-47c38495fd75\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.419504 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/2c9873f2-025e-499f-8a76-47c38495fd75-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"2c9873f2-025e-499f-8a76-47c38495fd75\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.419553 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f4scz\" (UniqueName: \"kubernetes.io/projected/2c9873f2-025e-499f-8a76-47c38495fd75-kube-api-access-f4scz\") pod \"rabbitmq-cell1-server-0\" (UID: \"2c9873f2-025e-499f-8a76-47c38495fd75\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.419597 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/2c9873f2-025e-499f-8a76-47c38495fd75-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"2c9873f2-025e-499f-8a76-47c38495fd75\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.419639 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/2c9873f2-025e-499f-8a76-47c38495fd75-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"2c9873f2-025e-499f-8a76-47c38495fd75\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.419677 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/2c9873f2-025e-499f-8a76-47c38495fd75-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"2c9873f2-025e-499f-8a76-47c38495fd75\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.419727 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/2c9873f2-025e-499f-8a76-47c38495fd75-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"2c9873f2-025e-499f-8a76-47c38495fd75\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.419749 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/2c9873f2-025e-499f-8a76-47c38495fd75-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"2c9873f2-025e-499f-8a76-47c38495fd75\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.419799 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/2c9873f2-025e-499f-8a76-47c38495fd75-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"2c9873f2-025e-499f-8a76-47c38495fd75\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.419830 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2c9873f2-025e-499f-8a76-47c38495fd75-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"2c9873f2-025e-499f-8a76-47c38495fd75\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.419850 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"2c9873f2-025e-499f-8a76-47c38495fd75\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.419872 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/2c9873f2-025e-499f-8a76-47c38495fd75-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"2c9873f2-025e-499f-8a76-47c38495fd75\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.422522 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/2c9873f2-025e-499f-8a76-47c38495fd75-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"2c9873f2-025e-499f-8a76-47c38495fd75\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.424338 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/2c9873f2-025e-499f-8a76-47c38495fd75-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"2c9873f2-025e-499f-8a76-47c38495fd75\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.425029 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2c9873f2-025e-499f-8a76-47c38495fd75-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"2c9873f2-025e-499f-8a76-47c38495fd75\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.426061 5133 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"2c9873f2-025e-499f-8a76-47c38495fd75\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.426551 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/2c9873f2-025e-499f-8a76-47c38495fd75-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"2c9873f2-025e-499f-8a76-47c38495fd75\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.428201 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/2c9873f2-025e-499f-8a76-47c38495fd75-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"2c9873f2-025e-499f-8a76-47c38495fd75\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.428414 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/2c9873f2-025e-499f-8a76-47c38495fd75-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"2c9873f2-025e-499f-8a76-47c38495fd75\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.429975 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/2c9873f2-025e-499f-8a76-47c38495fd75-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"2c9873f2-025e-499f-8a76-47c38495fd75\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.430869 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/2c9873f2-025e-499f-8a76-47c38495fd75-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"2c9873f2-025e-499f-8a76-47c38495fd75\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.431933 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/2c9873f2-025e-499f-8a76-47c38495fd75-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"2c9873f2-025e-499f-8a76-47c38495fd75\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.494681 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"2c9873f2-025e-499f-8a76-47c38495fd75\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.517268 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f4scz\" (UniqueName: \"kubernetes.io/projected/2c9873f2-025e-499f-8a76-47c38495fd75-kube-api-access-f4scz\") pod \"rabbitmq-cell1-server-0\" (UID: \"2c9873f2-025e-499f-8a76-47c38495fd75\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.539409 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.623966 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-drqnz" event={"ID":"50c34142-ef58-4e11-acce-613c693cc483","Type":"ContainerStarted","Data":"0b5e38935b8567077df4bfa162a1bc8239cd3a969ba65d96045d76a83f672d42"} Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.639832 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-k5kck" event={"ID":"86efb203-682b-4933-acc2-14cf239d4770","Type":"ContainerStarted","Data":"494dbcf89e7ed85ff68e9931b6f79e7fddb8130e1fe87af84a074f36a6479c08"} Nov 21 14:02:00 crc kubenswrapper[5133]: I1121 14:02:00.862754 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 21 14:02:00 crc kubenswrapper[5133]: W1121 14:02:00.916565 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9aa1caed_f687_4526_a851_59b4d192b705.slice/crio-a7fc3f8b7d21155945191d7e900e3bff6b85752089c4e0fda63c12885d5423cf WatchSource:0}: Error finding container a7fc3f8b7d21155945191d7e900e3bff6b85752089c4e0fda63c12885d5423cf: Status 404 returned error can't find the container with id a7fc3f8b7d21155945191d7e900e3bff6b85752089c4e0fda63c12885d5423cf Nov 21 14:02:01 crc kubenswrapper[5133]: I1121 14:02:01.188607 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 21 14:02:01 crc kubenswrapper[5133]: W1121 14:02:01.204436 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2c9873f2_025e_499f_8a76_47c38495fd75.slice/crio-46043a57ce59dfbde180cf1937fded57281ada12bb3a56e8c9930a013963a006 WatchSource:0}: Error finding container 46043a57ce59dfbde180cf1937fded57281ada12bb3a56e8c9930a013963a006: Status 404 returned error can't find the container with id 46043a57ce59dfbde180cf1937fded57281ada12bb3a56e8c9930a013963a006 Nov 21 14:02:01 crc kubenswrapper[5133]: I1121 14:02:01.533592 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Nov 21 14:02:01 crc kubenswrapper[5133]: I1121 14:02:01.536105 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 21 14:02:01 crc kubenswrapper[5133]: I1121 14:02:01.543088 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Nov 21 14:02:01 crc kubenswrapper[5133]: I1121 14:02:01.543400 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-h49g2" Nov 21 14:02:01 crc kubenswrapper[5133]: I1121 14:02:01.543693 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Nov 21 14:02:01 crc kubenswrapper[5133]: I1121 14:02:01.546652 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Nov 21 14:02:01 crc kubenswrapper[5133]: I1121 14:02:01.550500 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Nov 21 14:02:01 crc kubenswrapper[5133]: I1121 14:02:01.573510 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 21 14:02:01 crc kubenswrapper[5133]: I1121 14:02:01.580422 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/3afd96f5-effd-43e8-8986-b9fc1fd28233-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"3afd96f5-effd-43e8-8986-b9fc1fd28233\") " pod="openstack/openstack-galera-0" Nov 21 14:02:01 crc kubenswrapper[5133]: I1121 14:02:01.580510 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/3afd96f5-effd-43e8-8986-b9fc1fd28233-config-data-generated\") pod \"openstack-galera-0\" (UID: \"3afd96f5-effd-43e8-8986-b9fc1fd28233\") " pod="openstack/openstack-galera-0" Nov 21 14:02:01 crc kubenswrapper[5133]: I1121 14:02:01.580563 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3afd96f5-effd-43e8-8986-b9fc1fd28233-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"3afd96f5-effd-43e8-8986-b9fc1fd28233\") " pod="openstack/openstack-galera-0" Nov 21 14:02:01 crc kubenswrapper[5133]: I1121 14:02:01.580681 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-galera-0\" (UID: \"3afd96f5-effd-43e8-8986-b9fc1fd28233\") " pod="openstack/openstack-galera-0" Nov 21 14:02:01 crc kubenswrapper[5133]: I1121 14:02:01.580706 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/3afd96f5-effd-43e8-8986-b9fc1fd28233-kolla-config\") pod \"openstack-galera-0\" (UID: \"3afd96f5-effd-43e8-8986-b9fc1fd28233\") " pod="openstack/openstack-galera-0" Nov 21 14:02:01 crc kubenswrapper[5133]: I1121 14:02:01.580761 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3afd96f5-effd-43e8-8986-b9fc1fd28233-operator-scripts\") pod \"openstack-galera-0\" (UID: \"3afd96f5-effd-43e8-8986-b9fc1fd28233\") " pod="openstack/openstack-galera-0" Nov 21 14:02:01 crc kubenswrapper[5133]: I1121 14:02:01.580819 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9sx5q\" (UniqueName: \"kubernetes.io/projected/3afd96f5-effd-43e8-8986-b9fc1fd28233-kube-api-access-9sx5q\") pod \"openstack-galera-0\" (UID: \"3afd96f5-effd-43e8-8986-b9fc1fd28233\") " pod="openstack/openstack-galera-0" Nov 21 14:02:01 crc kubenswrapper[5133]: I1121 14:02:01.580862 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/3afd96f5-effd-43e8-8986-b9fc1fd28233-config-data-default\") pod \"openstack-galera-0\" (UID: \"3afd96f5-effd-43e8-8986-b9fc1fd28233\") " pod="openstack/openstack-galera-0" Nov 21 14:02:01 crc kubenswrapper[5133]: I1121 14:02:01.665750 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"9aa1caed-f687-4526-a851-59b4d192b705","Type":"ContainerStarted","Data":"a7fc3f8b7d21155945191d7e900e3bff6b85752089c4e0fda63c12885d5423cf"} Nov 21 14:02:01 crc kubenswrapper[5133]: I1121 14:02:01.671194 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"2c9873f2-025e-499f-8a76-47c38495fd75","Type":"ContainerStarted","Data":"46043a57ce59dfbde180cf1937fded57281ada12bb3a56e8c9930a013963a006"} Nov 21 14:02:01 crc kubenswrapper[5133]: I1121 14:02:01.685804 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9sx5q\" (UniqueName: \"kubernetes.io/projected/3afd96f5-effd-43e8-8986-b9fc1fd28233-kube-api-access-9sx5q\") pod \"openstack-galera-0\" (UID: \"3afd96f5-effd-43e8-8986-b9fc1fd28233\") " pod="openstack/openstack-galera-0" Nov 21 14:02:01 crc kubenswrapper[5133]: I1121 14:02:01.685868 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/3afd96f5-effd-43e8-8986-b9fc1fd28233-config-data-default\") pod \"openstack-galera-0\" (UID: \"3afd96f5-effd-43e8-8986-b9fc1fd28233\") " pod="openstack/openstack-galera-0" Nov 21 14:02:01 crc kubenswrapper[5133]: I1121 14:02:01.685919 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/3afd96f5-effd-43e8-8986-b9fc1fd28233-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"3afd96f5-effd-43e8-8986-b9fc1fd28233\") " pod="openstack/openstack-galera-0" Nov 21 14:02:01 crc kubenswrapper[5133]: I1121 14:02:01.685953 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/3afd96f5-effd-43e8-8986-b9fc1fd28233-config-data-generated\") pod \"openstack-galera-0\" (UID: \"3afd96f5-effd-43e8-8986-b9fc1fd28233\") " pod="openstack/openstack-galera-0" Nov 21 14:02:01 crc kubenswrapper[5133]: I1121 14:02:01.685980 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3afd96f5-effd-43e8-8986-b9fc1fd28233-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"3afd96f5-effd-43e8-8986-b9fc1fd28233\") " pod="openstack/openstack-galera-0" Nov 21 14:02:01 crc kubenswrapper[5133]: I1121 14:02:01.686065 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-galera-0\" (UID: \"3afd96f5-effd-43e8-8986-b9fc1fd28233\") " pod="openstack/openstack-galera-0" Nov 21 14:02:01 crc kubenswrapper[5133]: I1121 14:02:01.686091 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/3afd96f5-effd-43e8-8986-b9fc1fd28233-kolla-config\") pod \"openstack-galera-0\" (UID: \"3afd96f5-effd-43e8-8986-b9fc1fd28233\") " pod="openstack/openstack-galera-0" Nov 21 14:02:01 crc kubenswrapper[5133]: I1121 14:02:01.686124 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3afd96f5-effd-43e8-8986-b9fc1fd28233-operator-scripts\") pod \"openstack-galera-0\" (UID: \"3afd96f5-effd-43e8-8986-b9fc1fd28233\") " pod="openstack/openstack-galera-0" Nov 21 14:02:01 crc kubenswrapper[5133]: I1121 14:02:01.686890 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/3afd96f5-effd-43e8-8986-b9fc1fd28233-config-data-generated\") pod \"openstack-galera-0\" (UID: \"3afd96f5-effd-43e8-8986-b9fc1fd28233\") " pod="openstack/openstack-galera-0" Nov 21 14:02:01 crc kubenswrapper[5133]: I1121 14:02:01.687892 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/3afd96f5-effd-43e8-8986-b9fc1fd28233-config-data-default\") pod \"openstack-galera-0\" (UID: \"3afd96f5-effd-43e8-8986-b9fc1fd28233\") " pod="openstack/openstack-galera-0" Nov 21 14:02:01 crc kubenswrapper[5133]: I1121 14:02:01.688088 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3afd96f5-effd-43e8-8986-b9fc1fd28233-operator-scripts\") pod \"openstack-galera-0\" (UID: \"3afd96f5-effd-43e8-8986-b9fc1fd28233\") " pod="openstack/openstack-galera-0" Nov 21 14:02:01 crc kubenswrapper[5133]: I1121 14:02:01.689342 5133 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-galera-0\" (UID: \"3afd96f5-effd-43e8-8986-b9fc1fd28233\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/openstack-galera-0" Nov 21 14:02:01 crc kubenswrapper[5133]: I1121 14:02:01.690822 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/3afd96f5-effd-43e8-8986-b9fc1fd28233-kolla-config\") pod \"openstack-galera-0\" (UID: \"3afd96f5-effd-43e8-8986-b9fc1fd28233\") " pod="openstack/openstack-galera-0" Nov 21 14:02:01 crc kubenswrapper[5133]: I1121 14:02:01.706289 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9sx5q\" (UniqueName: \"kubernetes.io/projected/3afd96f5-effd-43e8-8986-b9fc1fd28233-kube-api-access-9sx5q\") pod \"openstack-galera-0\" (UID: \"3afd96f5-effd-43e8-8986-b9fc1fd28233\") " pod="openstack/openstack-galera-0" Nov 21 14:02:01 crc kubenswrapper[5133]: I1121 14:02:01.706461 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/3afd96f5-effd-43e8-8986-b9fc1fd28233-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"3afd96f5-effd-43e8-8986-b9fc1fd28233\") " pod="openstack/openstack-galera-0" Nov 21 14:02:01 crc kubenswrapper[5133]: I1121 14:02:01.707053 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3afd96f5-effd-43e8-8986-b9fc1fd28233-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"3afd96f5-effd-43e8-8986-b9fc1fd28233\") " pod="openstack/openstack-galera-0" Nov 21 14:02:01 crc kubenswrapper[5133]: I1121 14:02:01.711263 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-galera-0\" (UID: \"3afd96f5-effd-43e8-8986-b9fc1fd28233\") " pod="openstack/openstack-galera-0" Nov 21 14:02:01 crc kubenswrapper[5133]: I1121 14:02:01.875519 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 21 14:02:02 crc kubenswrapper[5133]: I1121 14:02:02.402195 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 21 14:02:02 crc kubenswrapper[5133]: W1121 14:02:02.420928 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3afd96f5_effd_43e8_8986_b9fc1fd28233.slice/crio-3f6c6c416effa847b66d2237740dac8b2cc6e4a8c03758f8a31aa533584a7e6b WatchSource:0}: Error finding container 3f6c6c416effa847b66d2237740dac8b2cc6e4a8c03758f8a31aa533584a7e6b: Status 404 returned error can't find the container with id 3f6c6c416effa847b66d2237740dac8b2cc6e4a8c03758f8a31aa533584a7e6b Nov 21 14:02:02 crc kubenswrapper[5133]: I1121 14:02:02.763809 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"3afd96f5-effd-43e8-8986-b9fc1fd28233","Type":"ContainerStarted","Data":"3f6c6c416effa847b66d2237740dac8b2cc6e4a8c03758f8a31aa533584a7e6b"} Nov 21 14:02:02 crc kubenswrapper[5133]: I1121 14:02:02.882156 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 21 14:02:02 crc kubenswrapper[5133]: I1121 14:02:02.887086 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 21 14:02:02 crc kubenswrapper[5133]: I1121 14:02:02.890301 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Nov 21 14:02:02 crc kubenswrapper[5133]: I1121 14:02:02.890888 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Nov 21 14:02:02 crc kubenswrapper[5133]: I1121 14:02:02.891076 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-857g5" Nov 21 14:02:02 crc kubenswrapper[5133]: I1121 14:02:02.891227 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Nov 21 14:02:02 crc kubenswrapper[5133]: I1121 14:02:02.894116 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 21 14:02:03 crc kubenswrapper[5133]: I1121 14:02:03.020981 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8wk55\" (UniqueName: \"kubernetes.io/projected/65613fe9-4492-410c-9be2-2d01dcb1e085-kube-api-access-8wk55\") pod \"openstack-cell1-galera-0\" (UID: \"65613fe9-4492-410c-9be2-2d01dcb1e085\") " pod="openstack/openstack-cell1-galera-0" Nov 21 14:02:03 crc kubenswrapper[5133]: I1121 14:02:03.021160 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/65613fe9-4492-410c-9be2-2d01dcb1e085-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"65613fe9-4492-410c-9be2-2d01dcb1e085\") " pod="openstack/openstack-cell1-galera-0" Nov 21 14:02:03 crc kubenswrapper[5133]: I1121 14:02:03.021204 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/65613fe9-4492-410c-9be2-2d01dcb1e085-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"65613fe9-4492-410c-9be2-2d01dcb1e085\") " pod="openstack/openstack-cell1-galera-0" Nov 21 14:02:03 crc kubenswrapper[5133]: I1121 14:02:03.022094 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/65613fe9-4492-410c-9be2-2d01dcb1e085-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"65613fe9-4492-410c-9be2-2d01dcb1e085\") " pod="openstack/openstack-cell1-galera-0" Nov 21 14:02:03 crc kubenswrapper[5133]: I1121 14:02:03.022133 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-cell1-galera-0\" (UID: \"65613fe9-4492-410c-9be2-2d01dcb1e085\") " pod="openstack/openstack-cell1-galera-0" Nov 21 14:02:03 crc kubenswrapper[5133]: I1121 14:02:03.022175 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/65613fe9-4492-410c-9be2-2d01dcb1e085-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"65613fe9-4492-410c-9be2-2d01dcb1e085\") " pod="openstack/openstack-cell1-galera-0" Nov 21 14:02:03 crc kubenswrapper[5133]: I1121 14:02:03.022208 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/65613fe9-4492-410c-9be2-2d01dcb1e085-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"65613fe9-4492-410c-9be2-2d01dcb1e085\") " pod="openstack/openstack-cell1-galera-0" Nov 21 14:02:03 crc kubenswrapper[5133]: I1121 14:02:03.022231 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65613fe9-4492-410c-9be2-2d01dcb1e085-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"65613fe9-4492-410c-9be2-2d01dcb1e085\") " pod="openstack/openstack-cell1-galera-0" Nov 21 14:02:03 crc kubenswrapper[5133]: I1121 14:02:03.124409 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8wk55\" (UniqueName: \"kubernetes.io/projected/65613fe9-4492-410c-9be2-2d01dcb1e085-kube-api-access-8wk55\") pod \"openstack-cell1-galera-0\" (UID: \"65613fe9-4492-410c-9be2-2d01dcb1e085\") " pod="openstack/openstack-cell1-galera-0" Nov 21 14:02:03 crc kubenswrapper[5133]: I1121 14:02:03.124539 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/65613fe9-4492-410c-9be2-2d01dcb1e085-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"65613fe9-4492-410c-9be2-2d01dcb1e085\") " pod="openstack/openstack-cell1-galera-0" Nov 21 14:02:03 crc kubenswrapper[5133]: I1121 14:02:03.124568 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/65613fe9-4492-410c-9be2-2d01dcb1e085-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"65613fe9-4492-410c-9be2-2d01dcb1e085\") " pod="openstack/openstack-cell1-galera-0" Nov 21 14:02:03 crc kubenswrapper[5133]: I1121 14:02:03.124594 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/65613fe9-4492-410c-9be2-2d01dcb1e085-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"65613fe9-4492-410c-9be2-2d01dcb1e085\") " pod="openstack/openstack-cell1-galera-0" Nov 21 14:02:03 crc kubenswrapper[5133]: I1121 14:02:03.124624 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-cell1-galera-0\" (UID: \"65613fe9-4492-410c-9be2-2d01dcb1e085\") " pod="openstack/openstack-cell1-galera-0" Nov 21 14:02:03 crc kubenswrapper[5133]: I1121 14:02:03.124680 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/65613fe9-4492-410c-9be2-2d01dcb1e085-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"65613fe9-4492-410c-9be2-2d01dcb1e085\") " pod="openstack/openstack-cell1-galera-0" Nov 21 14:02:03 crc kubenswrapper[5133]: I1121 14:02:03.124719 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/65613fe9-4492-410c-9be2-2d01dcb1e085-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"65613fe9-4492-410c-9be2-2d01dcb1e085\") " pod="openstack/openstack-cell1-galera-0" Nov 21 14:02:03 crc kubenswrapper[5133]: I1121 14:02:03.124737 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65613fe9-4492-410c-9be2-2d01dcb1e085-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"65613fe9-4492-410c-9be2-2d01dcb1e085\") " pod="openstack/openstack-cell1-galera-0" Nov 21 14:02:03 crc kubenswrapper[5133]: I1121 14:02:03.125072 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/65613fe9-4492-410c-9be2-2d01dcb1e085-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"65613fe9-4492-410c-9be2-2d01dcb1e085\") " pod="openstack/openstack-cell1-galera-0" Nov 21 14:02:03 crc kubenswrapper[5133]: I1121 14:02:03.125309 5133 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-cell1-galera-0\" (UID: \"65613fe9-4492-410c-9be2-2d01dcb1e085\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/openstack-cell1-galera-0" Nov 21 14:02:03 crc kubenswrapper[5133]: I1121 14:02:03.125802 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/65613fe9-4492-410c-9be2-2d01dcb1e085-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"65613fe9-4492-410c-9be2-2d01dcb1e085\") " pod="openstack/openstack-cell1-galera-0" Nov 21 14:02:03 crc kubenswrapper[5133]: I1121 14:02:03.129777 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/65613fe9-4492-410c-9be2-2d01dcb1e085-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"65613fe9-4492-410c-9be2-2d01dcb1e085\") " pod="openstack/openstack-cell1-galera-0" Nov 21 14:02:03 crc kubenswrapper[5133]: I1121 14:02:03.129881 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/65613fe9-4492-410c-9be2-2d01dcb1e085-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"65613fe9-4492-410c-9be2-2d01dcb1e085\") " pod="openstack/openstack-cell1-galera-0" Nov 21 14:02:03 crc kubenswrapper[5133]: I1121 14:02:03.135292 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/65613fe9-4492-410c-9be2-2d01dcb1e085-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"65613fe9-4492-410c-9be2-2d01dcb1e085\") " pod="openstack/openstack-cell1-galera-0" Nov 21 14:02:03 crc kubenswrapper[5133]: I1121 14:02:03.150190 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8wk55\" (UniqueName: \"kubernetes.io/projected/65613fe9-4492-410c-9be2-2d01dcb1e085-kube-api-access-8wk55\") pod \"openstack-cell1-galera-0\" (UID: \"65613fe9-4492-410c-9be2-2d01dcb1e085\") " pod="openstack/openstack-cell1-galera-0" Nov 21 14:02:03 crc kubenswrapper[5133]: I1121 14:02:03.150636 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65613fe9-4492-410c-9be2-2d01dcb1e085-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"65613fe9-4492-410c-9be2-2d01dcb1e085\") " pod="openstack/openstack-cell1-galera-0" Nov 21 14:02:03 crc kubenswrapper[5133]: I1121 14:02:03.253406 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-cell1-galera-0\" (UID: \"65613fe9-4492-410c-9be2-2d01dcb1e085\") " pod="openstack/openstack-cell1-galera-0" Nov 21 14:02:03 crc kubenswrapper[5133]: I1121 14:02:03.269919 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Nov 21 14:02:03 crc kubenswrapper[5133]: I1121 14:02:03.273018 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 21 14:02:03 crc kubenswrapper[5133]: I1121 14:02:03.278339 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Nov 21 14:02:03 crc kubenswrapper[5133]: I1121 14:02:03.279871 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Nov 21 14:02:03 crc kubenswrapper[5133]: I1121 14:02:03.280856 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-69m5b" Nov 21 14:02:03 crc kubenswrapper[5133]: I1121 14:02:03.288863 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 21 14:02:03 crc kubenswrapper[5133]: I1121 14:02:03.338395 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/507698df-bffb-4ed6-bbad-7e62bb1875f7-kolla-config\") pod \"memcached-0\" (UID: \"507698df-bffb-4ed6-bbad-7e62bb1875f7\") " pod="openstack/memcached-0" Nov 21 14:02:03 crc kubenswrapper[5133]: I1121 14:02:03.338680 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/507698df-bffb-4ed6-bbad-7e62bb1875f7-combined-ca-bundle\") pod \"memcached-0\" (UID: \"507698df-bffb-4ed6-bbad-7e62bb1875f7\") " pod="openstack/memcached-0" Nov 21 14:02:03 crc kubenswrapper[5133]: I1121 14:02:03.338765 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/507698df-bffb-4ed6-bbad-7e62bb1875f7-config-data\") pod \"memcached-0\" (UID: \"507698df-bffb-4ed6-bbad-7e62bb1875f7\") " pod="openstack/memcached-0" Nov 21 14:02:03 crc kubenswrapper[5133]: I1121 14:02:03.338787 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2vgm4\" (UniqueName: \"kubernetes.io/projected/507698df-bffb-4ed6-bbad-7e62bb1875f7-kube-api-access-2vgm4\") pod \"memcached-0\" (UID: \"507698df-bffb-4ed6-bbad-7e62bb1875f7\") " pod="openstack/memcached-0" Nov 21 14:02:03 crc kubenswrapper[5133]: I1121 14:02:03.338811 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/507698df-bffb-4ed6-bbad-7e62bb1875f7-memcached-tls-certs\") pod \"memcached-0\" (UID: \"507698df-bffb-4ed6-bbad-7e62bb1875f7\") " pod="openstack/memcached-0" Nov 21 14:02:03 crc kubenswrapper[5133]: I1121 14:02:03.445248 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/507698df-bffb-4ed6-bbad-7e62bb1875f7-config-data\") pod \"memcached-0\" (UID: \"507698df-bffb-4ed6-bbad-7e62bb1875f7\") " pod="openstack/memcached-0" Nov 21 14:02:03 crc kubenswrapper[5133]: I1121 14:02:03.445313 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2vgm4\" (UniqueName: \"kubernetes.io/projected/507698df-bffb-4ed6-bbad-7e62bb1875f7-kube-api-access-2vgm4\") pod \"memcached-0\" (UID: \"507698df-bffb-4ed6-bbad-7e62bb1875f7\") " pod="openstack/memcached-0" Nov 21 14:02:03 crc kubenswrapper[5133]: I1121 14:02:03.445334 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/507698df-bffb-4ed6-bbad-7e62bb1875f7-memcached-tls-certs\") pod \"memcached-0\" (UID: \"507698df-bffb-4ed6-bbad-7e62bb1875f7\") " pod="openstack/memcached-0" Nov 21 14:02:03 crc kubenswrapper[5133]: I1121 14:02:03.445373 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/507698df-bffb-4ed6-bbad-7e62bb1875f7-kolla-config\") pod \"memcached-0\" (UID: \"507698df-bffb-4ed6-bbad-7e62bb1875f7\") " pod="openstack/memcached-0" Nov 21 14:02:03 crc kubenswrapper[5133]: I1121 14:02:03.445414 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/507698df-bffb-4ed6-bbad-7e62bb1875f7-combined-ca-bundle\") pod \"memcached-0\" (UID: \"507698df-bffb-4ed6-bbad-7e62bb1875f7\") " pod="openstack/memcached-0" Nov 21 14:02:03 crc kubenswrapper[5133]: I1121 14:02:03.449062 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/507698df-bffb-4ed6-bbad-7e62bb1875f7-config-data\") pod \"memcached-0\" (UID: \"507698df-bffb-4ed6-bbad-7e62bb1875f7\") " pod="openstack/memcached-0" Nov 21 14:02:03 crc kubenswrapper[5133]: I1121 14:02:03.450662 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/507698df-bffb-4ed6-bbad-7e62bb1875f7-kolla-config\") pod \"memcached-0\" (UID: \"507698df-bffb-4ed6-bbad-7e62bb1875f7\") " pod="openstack/memcached-0" Nov 21 14:02:03 crc kubenswrapper[5133]: I1121 14:02:03.455823 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/507698df-bffb-4ed6-bbad-7e62bb1875f7-combined-ca-bundle\") pod \"memcached-0\" (UID: \"507698df-bffb-4ed6-bbad-7e62bb1875f7\") " pod="openstack/memcached-0" Nov 21 14:02:03 crc kubenswrapper[5133]: I1121 14:02:03.456445 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/507698df-bffb-4ed6-bbad-7e62bb1875f7-memcached-tls-certs\") pod \"memcached-0\" (UID: \"507698df-bffb-4ed6-bbad-7e62bb1875f7\") " pod="openstack/memcached-0" Nov 21 14:02:03 crc kubenswrapper[5133]: I1121 14:02:03.472040 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2vgm4\" (UniqueName: \"kubernetes.io/projected/507698df-bffb-4ed6-bbad-7e62bb1875f7-kube-api-access-2vgm4\") pod \"memcached-0\" (UID: \"507698df-bffb-4ed6-bbad-7e62bb1875f7\") " pod="openstack/memcached-0" Nov 21 14:02:03 crc kubenswrapper[5133]: I1121 14:02:03.521232 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 21 14:02:03 crc kubenswrapper[5133]: I1121 14:02:03.639344 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 21 14:02:04 crc kubenswrapper[5133]: I1121 14:02:04.154828 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 21 14:02:04 crc kubenswrapper[5133]: W1121 14:02:04.190372 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod65613fe9_4492_410c_9be2_2d01dcb1e085.slice/crio-56f1e2bb93008e8f99e0f2d0fa5b06c5ef151dc635957ccea2f80f7804901203 WatchSource:0}: Error finding container 56f1e2bb93008e8f99e0f2d0fa5b06c5ef151dc635957ccea2f80f7804901203: Status 404 returned error can't find the container with id 56f1e2bb93008e8f99e0f2d0fa5b06c5ef151dc635957ccea2f80f7804901203 Nov 21 14:02:04 crc kubenswrapper[5133]: I1121 14:02:04.518232 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 21 14:02:04 crc kubenswrapper[5133]: I1121 14:02:04.818502 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"65613fe9-4492-410c-9be2-2d01dcb1e085","Type":"ContainerStarted","Data":"56f1e2bb93008e8f99e0f2d0fa5b06c5ef151dc635957ccea2f80f7804901203"} Nov 21 14:02:05 crc kubenswrapper[5133]: I1121 14:02:05.044190 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 21 14:02:05 crc kubenswrapper[5133]: I1121 14:02:05.045709 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 21 14:02:05 crc kubenswrapper[5133]: I1121 14:02:05.053172 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 21 14:02:05 crc kubenswrapper[5133]: I1121 14:02:05.057444 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-8t9vs" Nov 21 14:02:05 crc kubenswrapper[5133]: I1121 14:02:05.089290 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2kdxk\" (UniqueName: \"kubernetes.io/projected/866a2674-f006-4883-9598-6902879561e6-kube-api-access-2kdxk\") pod \"kube-state-metrics-0\" (UID: \"866a2674-f006-4883-9598-6902879561e6\") " pod="openstack/kube-state-metrics-0" Nov 21 14:02:05 crc kubenswrapper[5133]: I1121 14:02:05.193326 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2kdxk\" (UniqueName: \"kubernetes.io/projected/866a2674-f006-4883-9598-6902879561e6-kube-api-access-2kdxk\") pod \"kube-state-metrics-0\" (UID: \"866a2674-f006-4883-9598-6902879561e6\") " pod="openstack/kube-state-metrics-0" Nov 21 14:02:05 crc kubenswrapper[5133]: I1121 14:02:05.216493 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2kdxk\" (UniqueName: \"kubernetes.io/projected/866a2674-f006-4883-9598-6902879561e6-kube-api-access-2kdxk\") pod \"kube-state-metrics-0\" (UID: \"866a2674-f006-4883-9598-6902879561e6\") " pod="openstack/kube-state-metrics-0" Nov 21 14:02:05 crc kubenswrapper[5133]: I1121 14:02:05.389116 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 21 14:02:08 crc kubenswrapper[5133]: I1121 14:02:08.594284 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-2ckfn"] Nov 21 14:02:08 crc kubenswrapper[5133]: I1121 14:02:08.596767 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-2ckfn" Nov 21 14:02:08 crc kubenswrapper[5133]: I1121 14:02:08.599539 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Nov 21 14:02:08 crc kubenswrapper[5133]: I1121 14:02:08.600139 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-vvvdz" Nov 21 14:02:08 crc kubenswrapper[5133]: I1121 14:02:08.600727 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Nov 21 14:02:08 crc kubenswrapper[5133]: I1121 14:02:08.603721 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-hbfsj"] Nov 21 14:02:08 crc kubenswrapper[5133]: I1121 14:02:08.605687 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-hbfsj" Nov 21 14:02:08 crc kubenswrapper[5133]: I1121 14:02:08.612380 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-2ckfn"] Nov 21 14:02:08 crc kubenswrapper[5133]: I1121 14:02:08.618547 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-hbfsj"] Nov 21 14:02:08 crc kubenswrapper[5133]: I1121 14:02:08.674657 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6afb5f16-3806-4fbf-becf-8bf66576286f-combined-ca-bundle\") pod \"ovn-controller-2ckfn\" (UID: \"6afb5f16-3806-4fbf-becf-8bf66576286f\") " pod="openstack/ovn-controller-2ckfn" Nov 21 14:02:08 crc kubenswrapper[5133]: I1121 14:02:08.674713 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4vzkv\" (UniqueName: \"kubernetes.io/projected/6afb5f16-3806-4fbf-becf-8bf66576286f-kube-api-access-4vzkv\") pod \"ovn-controller-2ckfn\" (UID: \"6afb5f16-3806-4fbf-becf-8bf66576286f\") " pod="openstack/ovn-controller-2ckfn" Nov 21 14:02:08 crc kubenswrapper[5133]: I1121 14:02:08.674902 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/6afb5f16-3806-4fbf-becf-8bf66576286f-var-run\") pod \"ovn-controller-2ckfn\" (UID: \"6afb5f16-3806-4fbf-becf-8bf66576286f\") " pod="openstack/ovn-controller-2ckfn" Nov 21 14:02:08 crc kubenswrapper[5133]: I1121 14:02:08.674928 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p9tdb\" (UniqueName: \"kubernetes.io/projected/a1df22cc-e9c1-48b2-a807-ea8e93f8d366-kube-api-access-p9tdb\") pod \"ovn-controller-ovs-hbfsj\" (UID: \"a1df22cc-e9c1-48b2-a807-ea8e93f8d366\") " pod="openstack/ovn-controller-ovs-hbfsj" Nov 21 14:02:08 crc kubenswrapper[5133]: I1121 14:02:08.674968 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6afb5f16-3806-4fbf-becf-8bf66576286f-scripts\") pod \"ovn-controller-2ckfn\" (UID: \"6afb5f16-3806-4fbf-becf-8bf66576286f\") " pod="openstack/ovn-controller-2ckfn" Nov 21 14:02:08 crc kubenswrapper[5133]: I1121 14:02:08.675026 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/a1df22cc-e9c1-48b2-a807-ea8e93f8d366-var-lib\") pod \"ovn-controller-ovs-hbfsj\" (UID: \"a1df22cc-e9c1-48b2-a807-ea8e93f8d366\") " pod="openstack/ovn-controller-ovs-hbfsj" Nov 21 14:02:08 crc kubenswrapper[5133]: I1121 14:02:08.675045 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/a1df22cc-e9c1-48b2-a807-ea8e93f8d366-etc-ovs\") pod \"ovn-controller-ovs-hbfsj\" (UID: \"a1df22cc-e9c1-48b2-a807-ea8e93f8d366\") " pod="openstack/ovn-controller-ovs-hbfsj" Nov 21 14:02:08 crc kubenswrapper[5133]: I1121 14:02:08.675075 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/6afb5f16-3806-4fbf-becf-8bf66576286f-var-log-ovn\") pod \"ovn-controller-2ckfn\" (UID: \"6afb5f16-3806-4fbf-becf-8bf66576286f\") " pod="openstack/ovn-controller-2ckfn" Nov 21 14:02:08 crc kubenswrapper[5133]: I1121 14:02:08.675115 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/6afb5f16-3806-4fbf-becf-8bf66576286f-var-run-ovn\") pod \"ovn-controller-2ckfn\" (UID: \"6afb5f16-3806-4fbf-becf-8bf66576286f\") " pod="openstack/ovn-controller-2ckfn" Nov 21 14:02:08 crc kubenswrapper[5133]: I1121 14:02:08.675150 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a1df22cc-e9c1-48b2-a807-ea8e93f8d366-scripts\") pod \"ovn-controller-ovs-hbfsj\" (UID: \"a1df22cc-e9c1-48b2-a807-ea8e93f8d366\") " pod="openstack/ovn-controller-ovs-hbfsj" Nov 21 14:02:08 crc kubenswrapper[5133]: I1121 14:02:08.675220 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/a1df22cc-e9c1-48b2-a807-ea8e93f8d366-var-log\") pod \"ovn-controller-ovs-hbfsj\" (UID: \"a1df22cc-e9c1-48b2-a807-ea8e93f8d366\") " pod="openstack/ovn-controller-ovs-hbfsj" Nov 21 14:02:08 crc kubenswrapper[5133]: I1121 14:02:08.675266 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/6afb5f16-3806-4fbf-becf-8bf66576286f-ovn-controller-tls-certs\") pod \"ovn-controller-2ckfn\" (UID: \"6afb5f16-3806-4fbf-becf-8bf66576286f\") " pod="openstack/ovn-controller-2ckfn" Nov 21 14:02:08 crc kubenswrapper[5133]: I1121 14:02:08.675299 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/a1df22cc-e9c1-48b2-a807-ea8e93f8d366-var-run\") pod \"ovn-controller-ovs-hbfsj\" (UID: \"a1df22cc-e9c1-48b2-a807-ea8e93f8d366\") " pod="openstack/ovn-controller-ovs-hbfsj" Nov 21 14:02:08 crc kubenswrapper[5133]: I1121 14:02:08.777217 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/a1df22cc-e9c1-48b2-a807-ea8e93f8d366-var-run\") pod \"ovn-controller-ovs-hbfsj\" (UID: \"a1df22cc-e9c1-48b2-a807-ea8e93f8d366\") " pod="openstack/ovn-controller-ovs-hbfsj" Nov 21 14:02:08 crc kubenswrapper[5133]: I1121 14:02:08.777993 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6afb5f16-3806-4fbf-becf-8bf66576286f-combined-ca-bundle\") pod \"ovn-controller-2ckfn\" (UID: \"6afb5f16-3806-4fbf-becf-8bf66576286f\") " pod="openstack/ovn-controller-2ckfn" Nov 21 14:02:08 crc kubenswrapper[5133]: I1121 14:02:08.778048 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4vzkv\" (UniqueName: \"kubernetes.io/projected/6afb5f16-3806-4fbf-becf-8bf66576286f-kube-api-access-4vzkv\") pod \"ovn-controller-2ckfn\" (UID: \"6afb5f16-3806-4fbf-becf-8bf66576286f\") " pod="openstack/ovn-controller-2ckfn" Nov 21 14:02:08 crc kubenswrapper[5133]: I1121 14:02:08.778089 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/6afb5f16-3806-4fbf-becf-8bf66576286f-var-run\") pod \"ovn-controller-2ckfn\" (UID: \"6afb5f16-3806-4fbf-becf-8bf66576286f\") " pod="openstack/ovn-controller-2ckfn" Nov 21 14:02:08 crc kubenswrapper[5133]: I1121 14:02:08.778118 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p9tdb\" (UniqueName: \"kubernetes.io/projected/a1df22cc-e9c1-48b2-a807-ea8e93f8d366-kube-api-access-p9tdb\") pod \"ovn-controller-ovs-hbfsj\" (UID: \"a1df22cc-e9c1-48b2-a807-ea8e93f8d366\") " pod="openstack/ovn-controller-ovs-hbfsj" Nov 21 14:02:08 crc kubenswrapper[5133]: I1121 14:02:08.778155 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6afb5f16-3806-4fbf-becf-8bf66576286f-scripts\") pod \"ovn-controller-2ckfn\" (UID: \"6afb5f16-3806-4fbf-becf-8bf66576286f\") " pod="openstack/ovn-controller-2ckfn" Nov 21 14:02:08 crc kubenswrapper[5133]: I1121 14:02:08.779436 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/a1df22cc-e9c1-48b2-a807-ea8e93f8d366-var-lib\") pod \"ovn-controller-ovs-hbfsj\" (UID: \"a1df22cc-e9c1-48b2-a807-ea8e93f8d366\") " pod="openstack/ovn-controller-ovs-hbfsj" Nov 21 14:02:08 crc kubenswrapper[5133]: I1121 14:02:08.779665 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/a1df22cc-e9c1-48b2-a807-ea8e93f8d366-var-run\") pod \"ovn-controller-ovs-hbfsj\" (UID: \"a1df22cc-e9c1-48b2-a807-ea8e93f8d366\") " pod="openstack/ovn-controller-ovs-hbfsj" Nov 21 14:02:08 crc kubenswrapper[5133]: I1121 14:02:08.781213 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6afb5f16-3806-4fbf-becf-8bf66576286f-scripts\") pod \"ovn-controller-2ckfn\" (UID: \"6afb5f16-3806-4fbf-becf-8bf66576286f\") " pod="openstack/ovn-controller-2ckfn" Nov 21 14:02:08 crc kubenswrapper[5133]: I1121 14:02:08.781643 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/6afb5f16-3806-4fbf-becf-8bf66576286f-var-run\") pod \"ovn-controller-2ckfn\" (UID: \"6afb5f16-3806-4fbf-becf-8bf66576286f\") " pod="openstack/ovn-controller-2ckfn" Nov 21 14:02:08 crc kubenswrapper[5133]: I1121 14:02:08.781725 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/a1df22cc-e9c1-48b2-a807-ea8e93f8d366-var-lib\") pod \"ovn-controller-ovs-hbfsj\" (UID: \"a1df22cc-e9c1-48b2-a807-ea8e93f8d366\") " pod="openstack/ovn-controller-ovs-hbfsj" Nov 21 14:02:08 crc kubenswrapper[5133]: I1121 14:02:08.781765 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/a1df22cc-e9c1-48b2-a807-ea8e93f8d366-etc-ovs\") pod \"ovn-controller-ovs-hbfsj\" (UID: \"a1df22cc-e9c1-48b2-a807-ea8e93f8d366\") " pod="openstack/ovn-controller-ovs-hbfsj" Nov 21 14:02:08 crc kubenswrapper[5133]: I1121 14:02:08.781839 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/6afb5f16-3806-4fbf-becf-8bf66576286f-var-log-ovn\") pod \"ovn-controller-2ckfn\" (UID: \"6afb5f16-3806-4fbf-becf-8bf66576286f\") " pod="openstack/ovn-controller-2ckfn" Nov 21 14:02:08 crc kubenswrapper[5133]: I1121 14:02:08.783401 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/6afb5f16-3806-4fbf-becf-8bf66576286f-var-log-ovn\") pod \"ovn-controller-2ckfn\" (UID: \"6afb5f16-3806-4fbf-becf-8bf66576286f\") " pod="openstack/ovn-controller-2ckfn" Nov 21 14:02:08 crc kubenswrapper[5133]: I1121 14:02:08.783821 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/a1df22cc-e9c1-48b2-a807-ea8e93f8d366-etc-ovs\") pod \"ovn-controller-ovs-hbfsj\" (UID: \"a1df22cc-e9c1-48b2-a807-ea8e93f8d366\") " pod="openstack/ovn-controller-ovs-hbfsj" Nov 21 14:02:08 crc kubenswrapper[5133]: I1121 14:02:08.783976 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/6afb5f16-3806-4fbf-becf-8bf66576286f-var-run-ovn\") pod \"ovn-controller-2ckfn\" (UID: \"6afb5f16-3806-4fbf-becf-8bf66576286f\") " pod="openstack/ovn-controller-2ckfn" Nov 21 14:02:08 crc kubenswrapper[5133]: I1121 14:02:08.784039 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a1df22cc-e9c1-48b2-a807-ea8e93f8d366-scripts\") pod \"ovn-controller-ovs-hbfsj\" (UID: \"a1df22cc-e9c1-48b2-a807-ea8e93f8d366\") " pod="openstack/ovn-controller-ovs-hbfsj" Nov 21 14:02:08 crc kubenswrapper[5133]: I1121 14:02:08.784109 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/a1df22cc-e9c1-48b2-a807-ea8e93f8d366-var-log\") pod \"ovn-controller-ovs-hbfsj\" (UID: \"a1df22cc-e9c1-48b2-a807-ea8e93f8d366\") " pod="openstack/ovn-controller-ovs-hbfsj" Nov 21 14:02:08 crc kubenswrapper[5133]: I1121 14:02:08.784190 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/6afb5f16-3806-4fbf-becf-8bf66576286f-ovn-controller-tls-certs\") pod \"ovn-controller-2ckfn\" (UID: \"6afb5f16-3806-4fbf-becf-8bf66576286f\") " pod="openstack/ovn-controller-2ckfn" Nov 21 14:02:08 crc kubenswrapper[5133]: I1121 14:02:08.784746 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/6afb5f16-3806-4fbf-becf-8bf66576286f-var-run-ovn\") pod \"ovn-controller-2ckfn\" (UID: \"6afb5f16-3806-4fbf-becf-8bf66576286f\") " pod="openstack/ovn-controller-2ckfn" Nov 21 14:02:08 crc kubenswrapper[5133]: I1121 14:02:08.784889 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/a1df22cc-e9c1-48b2-a807-ea8e93f8d366-var-log\") pod \"ovn-controller-ovs-hbfsj\" (UID: \"a1df22cc-e9c1-48b2-a807-ea8e93f8d366\") " pod="openstack/ovn-controller-ovs-hbfsj" Nov 21 14:02:08 crc kubenswrapper[5133]: I1121 14:02:08.790147 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6afb5f16-3806-4fbf-becf-8bf66576286f-combined-ca-bundle\") pod \"ovn-controller-2ckfn\" (UID: \"6afb5f16-3806-4fbf-becf-8bf66576286f\") " pod="openstack/ovn-controller-2ckfn" Nov 21 14:02:08 crc kubenswrapper[5133]: I1121 14:02:08.792635 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a1df22cc-e9c1-48b2-a807-ea8e93f8d366-scripts\") pod \"ovn-controller-ovs-hbfsj\" (UID: \"a1df22cc-e9c1-48b2-a807-ea8e93f8d366\") " pod="openstack/ovn-controller-ovs-hbfsj" Nov 21 14:02:08 crc kubenswrapper[5133]: I1121 14:02:08.795262 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/6afb5f16-3806-4fbf-becf-8bf66576286f-ovn-controller-tls-certs\") pod \"ovn-controller-2ckfn\" (UID: \"6afb5f16-3806-4fbf-becf-8bf66576286f\") " pod="openstack/ovn-controller-2ckfn" Nov 21 14:02:08 crc kubenswrapper[5133]: I1121 14:02:08.798525 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4vzkv\" (UniqueName: \"kubernetes.io/projected/6afb5f16-3806-4fbf-becf-8bf66576286f-kube-api-access-4vzkv\") pod \"ovn-controller-2ckfn\" (UID: \"6afb5f16-3806-4fbf-becf-8bf66576286f\") " pod="openstack/ovn-controller-2ckfn" Nov 21 14:02:08 crc kubenswrapper[5133]: I1121 14:02:08.801648 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p9tdb\" (UniqueName: \"kubernetes.io/projected/a1df22cc-e9c1-48b2-a807-ea8e93f8d366-kube-api-access-p9tdb\") pod \"ovn-controller-ovs-hbfsj\" (UID: \"a1df22cc-e9c1-48b2-a807-ea8e93f8d366\") " pod="openstack/ovn-controller-ovs-hbfsj" Nov 21 14:02:08 crc kubenswrapper[5133]: I1121 14:02:08.923292 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-2ckfn" Nov 21 14:02:08 crc kubenswrapper[5133]: I1121 14:02:08.938905 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-hbfsj" Nov 21 14:02:09 crc kubenswrapper[5133]: I1121 14:02:09.070620 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 21 14:02:09 crc kubenswrapper[5133]: I1121 14:02:09.072196 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 21 14:02:09 crc kubenswrapper[5133]: I1121 14:02:09.074919 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Nov 21 14:02:09 crc kubenswrapper[5133]: I1121 14:02:09.075175 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Nov 21 14:02:09 crc kubenswrapper[5133]: I1121 14:02:09.075341 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Nov 21 14:02:09 crc kubenswrapper[5133]: I1121 14:02:09.079536 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-pvxgl" Nov 21 14:02:09 crc kubenswrapper[5133]: I1121 14:02:09.079971 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Nov 21 14:02:09 crc kubenswrapper[5133]: I1121 14:02:09.092557 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 21 14:02:09 crc kubenswrapper[5133]: I1121 14:02:09.193120 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2e94f41c-a61c-4502-b9c6-11bcaf9054dd-config\") pod \"ovsdbserver-nb-0\" (UID: \"2e94f41c-a61c-4502-b9c6-11bcaf9054dd\") " pod="openstack/ovsdbserver-nb-0" Nov 21 14:02:09 crc kubenswrapper[5133]: I1121 14:02:09.193202 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e94f41c-a61c-4502-b9c6-11bcaf9054dd-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"2e94f41c-a61c-4502-b9c6-11bcaf9054dd\") " pod="openstack/ovsdbserver-nb-0" Nov 21 14:02:09 crc kubenswrapper[5133]: I1121 14:02:09.193233 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ckj2d\" (UniqueName: \"kubernetes.io/projected/2e94f41c-a61c-4502-b9c6-11bcaf9054dd-kube-api-access-ckj2d\") pod \"ovsdbserver-nb-0\" (UID: \"2e94f41c-a61c-4502-b9c6-11bcaf9054dd\") " pod="openstack/ovsdbserver-nb-0" Nov 21 14:02:09 crc kubenswrapper[5133]: I1121 14:02:09.193934 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"ovsdbserver-nb-0\" (UID: \"2e94f41c-a61c-4502-b9c6-11bcaf9054dd\") " pod="openstack/ovsdbserver-nb-0" Nov 21 14:02:09 crc kubenswrapper[5133]: I1121 14:02:09.193973 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2e94f41c-a61c-4502-b9c6-11bcaf9054dd-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"2e94f41c-a61c-4502-b9c6-11bcaf9054dd\") " pod="openstack/ovsdbserver-nb-0" Nov 21 14:02:09 crc kubenswrapper[5133]: I1121 14:02:09.194035 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/2e94f41c-a61c-4502-b9c6-11bcaf9054dd-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"2e94f41c-a61c-4502-b9c6-11bcaf9054dd\") " pod="openstack/ovsdbserver-nb-0" Nov 21 14:02:09 crc kubenswrapper[5133]: I1121 14:02:09.194068 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/2e94f41c-a61c-4502-b9c6-11bcaf9054dd-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"2e94f41c-a61c-4502-b9c6-11bcaf9054dd\") " pod="openstack/ovsdbserver-nb-0" Nov 21 14:02:09 crc kubenswrapper[5133]: I1121 14:02:09.194097 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/2e94f41c-a61c-4502-b9c6-11bcaf9054dd-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"2e94f41c-a61c-4502-b9c6-11bcaf9054dd\") " pod="openstack/ovsdbserver-nb-0" Nov 21 14:02:09 crc kubenswrapper[5133]: I1121 14:02:09.295718 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/2e94f41c-a61c-4502-b9c6-11bcaf9054dd-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"2e94f41c-a61c-4502-b9c6-11bcaf9054dd\") " pod="openstack/ovsdbserver-nb-0" Nov 21 14:02:09 crc kubenswrapper[5133]: I1121 14:02:09.295816 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/2e94f41c-a61c-4502-b9c6-11bcaf9054dd-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"2e94f41c-a61c-4502-b9c6-11bcaf9054dd\") " pod="openstack/ovsdbserver-nb-0" Nov 21 14:02:09 crc kubenswrapper[5133]: I1121 14:02:09.295878 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/2e94f41c-a61c-4502-b9c6-11bcaf9054dd-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"2e94f41c-a61c-4502-b9c6-11bcaf9054dd\") " pod="openstack/ovsdbserver-nb-0" Nov 21 14:02:09 crc kubenswrapper[5133]: I1121 14:02:09.295922 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2e94f41c-a61c-4502-b9c6-11bcaf9054dd-config\") pod \"ovsdbserver-nb-0\" (UID: \"2e94f41c-a61c-4502-b9c6-11bcaf9054dd\") " pod="openstack/ovsdbserver-nb-0" Nov 21 14:02:09 crc kubenswrapper[5133]: I1121 14:02:09.295976 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e94f41c-a61c-4502-b9c6-11bcaf9054dd-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"2e94f41c-a61c-4502-b9c6-11bcaf9054dd\") " pod="openstack/ovsdbserver-nb-0" Nov 21 14:02:09 crc kubenswrapper[5133]: I1121 14:02:09.296020 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ckj2d\" (UniqueName: \"kubernetes.io/projected/2e94f41c-a61c-4502-b9c6-11bcaf9054dd-kube-api-access-ckj2d\") pod \"ovsdbserver-nb-0\" (UID: \"2e94f41c-a61c-4502-b9c6-11bcaf9054dd\") " pod="openstack/ovsdbserver-nb-0" Nov 21 14:02:09 crc kubenswrapper[5133]: I1121 14:02:09.296452 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/2e94f41c-a61c-4502-b9c6-11bcaf9054dd-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"2e94f41c-a61c-4502-b9c6-11bcaf9054dd\") " pod="openstack/ovsdbserver-nb-0" Nov 21 14:02:09 crc kubenswrapper[5133]: I1121 14:02:09.296464 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"ovsdbserver-nb-0\" (UID: \"2e94f41c-a61c-4502-b9c6-11bcaf9054dd\") " pod="openstack/ovsdbserver-nb-0" Nov 21 14:02:09 crc kubenswrapper[5133]: I1121 14:02:09.296554 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2e94f41c-a61c-4502-b9c6-11bcaf9054dd-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"2e94f41c-a61c-4502-b9c6-11bcaf9054dd\") " pod="openstack/ovsdbserver-nb-0" Nov 21 14:02:09 crc kubenswrapper[5133]: I1121 14:02:09.296842 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2e94f41c-a61c-4502-b9c6-11bcaf9054dd-config\") pod \"ovsdbserver-nb-0\" (UID: \"2e94f41c-a61c-4502-b9c6-11bcaf9054dd\") " pod="openstack/ovsdbserver-nb-0" Nov 21 14:02:09 crc kubenswrapper[5133]: I1121 14:02:09.296923 5133 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"ovsdbserver-nb-0\" (UID: \"2e94f41c-a61c-4502-b9c6-11bcaf9054dd\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/ovsdbserver-nb-0" Nov 21 14:02:09 crc kubenswrapper[5133]: I1121 14:02:09.297842 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2e94f41c-a61c-4502-b9c6-11bcaf9054dd-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"2e94f41c-a61c-4502-b9c6-11bcaf9054dd\") " pod="openstack/ovsdbserver-nb-0" Nov 21 14:02:09 crc kubenswrapper[5133]: I1121 14:02:09.301415 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/2e94f41c-a61c-4502-b9c6-11bcaf9054dd-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"2e94f41c-a61c-4502-b9c6-11bcaf9054dd\") " pod="openstack/ovsdbserver-nb-0" Nov 21 14:02:09 crc kubenswrapper[5133]: I1121 14:02:09.314772 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ckj2d\" (UniqueName: \"kubernetes.io/projected/2e94f41c-a61c-4502-b9c6-11bcaf9054dd-kube-api-access-ckj2d\") pod \"ovsdbserver-nb-0\" (UID: \"2e94f41c-a61c-4502-b9c6-11bcaf9054dd\") " pod="openstack/ovsdbserver-nb-0" Nov 21 14:02:09 crc kubenswrapper[5133]: I1121 14:02:09.333319 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e94f41c-a61c-4502-b9c6-11bcaf9054dd-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"2e94f41c-a61c-4502-b9c6-11bcaf9054dd\") " pod="openstack/ovsdbserver-nb-0" Nov 21 14:02:09 crc kubenswrapper[5133]: I1121 14:02:09.350895 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/2e94f41c-a61c-4502-b9c6-11bcaf9054dd-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"2e94f41c-a61c-4502-b9c6-11bcaf9054dd\") " pod="openstack/ovsdbserver-nb-0" Nov 21 14:02:09 crc kubenswrapper[5133]: I1121 14:02:09.361324 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"ovsdbserver-nb-0\" (UID: \"2e94f41c-a61c-4502-b9c6-11bcaf9054dd\") " pod="openstack/ovsdbserver-nb-0" Nov 21 14:02:09 crc kubenswrapper[5133]: I1121 14:02:09.396887 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 21 14:02:12 crc kubenswrapper[5133]: I1121 14:02:12.583225 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 21 14:02:12 crc kubenswrapper[5133]: I1121 14:02:12.585540 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 21 14:02:12 crc kubenswrapper[5133]: I1121 14:02:12.588482 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-nwbzq" Nov 21 14:02:12 crc kubenswrapper[5133]: I1121 14:02:12.589137 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Nov 21 14:02:12 crc kubenswrapper[5133]: I1121 14:02:12.589342 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Nov 21 14:02:12 crc kubenswrapper[5133]: I1121 14:02:12.589681 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Nov 21 14:02:12 crc kubenswrapper[5133]: I1121 14:02:12.597592 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 21 14:02:12 crc kubenswrapper[5133]: I1121 14:02:12.708503 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/799b923b-f086-45a3-b88d-01c78ab3b1f0-config\") pod \"ovsdbserver-sb-0\" (UID: \"799b923b-f086-45a3-b88d-01c78ab3b1f0\") " pod="openstack/ovsdbserver-sb-0" Nov 21 14:02:12 crc kubenswrapper[5133]: I1121 14:02:12.708590 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/799b923b-f086-45a3-b88d-01c78ab3b1f0-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"799b923b-f086-45a3-b88d-01c78ab3b1f0\") " pod="openstack/ovsdbserver-sb-0" Nov 21 14:02:12 crc kubenswrapper[5133]: I1121 14:02:12.708636 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/799b923b-f086-45a3-b88d-01c78ab3b1f0-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"799b923b-f086-45a3-b88d-01c78ab3b1f0\") " pod="openstack/ovsdbserver-sb-0" Nov 21 14:02:12 crc kubenswrapper[5133]: I1121 14:02:12.708661 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/799b923b-f086-45a3-b88d-01c78ab3b1f0-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"799b923b-f086-45a3-b88d-01c78ab3b1f0\") " pod="openstack/ovsdbserver-sb-0" Nov 21 14:02:12 crc kubenswrapper[5133]: I1121 14:02:12.708715 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"ovsdbserver-sb-0\" (UID: \"799b923b-f086-45a3-b88d-01c78ab3b1f0\") " pod="openstack/ovsdbserver-sb-0" Nov 21 14:02:12 crc kubenswrapper[5133]: I1121 14:02:12.708772 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/799b923b-f086-45a3-b88d-01c78ab3b1f0-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"799b923b-f086-45a3-b88d-01c78ab3b1f0\") " pod="openstack/ovsdbserver-sb-0" Nov 21 14:02:12 crc kubenswrapper[5133]: I1121 14:02:12.708826 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/799b923b-f086-45a3-b88d-01c78ab3b1f0-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"799b923b-f086-45a3-b88d-01c78ab3b1f0\") " pod="openstack/ovsdbserver-sb-0" Nov 21 14:02:12 crc kubenswrapper[5133]: I1121 14:02:12.708896 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h8r57\" (UniqueName: \"kubernetes.io/projected/799b923b-f086-45a3-b88d-01c78ab3b1f0-kube-api-access-h8r57\") pod \"ovsdbserver-sb-0\" (UID: \"799b923b-f086-45a3-b88d-01c78ab3b1f0\") " pod="openstack/ovsdbserver-sb-0" Nov 21 14:02:12 crc kubenswrapper[5133]: I1121 14:02:12.811074 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/799b923b-f086-45a3-b88d-01c78ab3b1f0-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"799b923b-f086-45a3-b88d-01c78ab3b1f0\") " pod="openstack/ovsdbserver-sb-0" Nov 21 14:02:12 crc kubenswrapper[5133]: I1121 14:02:12.811192 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h8r57\" (UniqueName: \"kubernetes.io/projected/799b923b-f086-45a3-b88d-01c78ab3b1f0-kube-api-access-h8r57\") pod \"ovsdbserver-sb-0\" (UID: \"799b923b-f086-45a3-b88d-01c78ab3b1f0\") " pod="openstack/ovsdbserver-sb-0" Nov 21 14:02:12 crc kubenswrapper[5133]: I1121 14:02:12.811238 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/799b923b-f086-45a3-b88d-01c78ab3b1f0-config\") pod \"ovsdbserver-sb-0\" (UID: \"799b923b-f086-45a3-b88d-01c78ab3b1f0\") " pod="openstack/ovsdbserver-sb-0" Nov 21 14:02:12 crc kubenswrapper[5133]: I1121 14:02:12.811268 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/799b923b-f086-45a3-b88d-01c78ab3b1f0-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"799b923b-f086-45a3-b88d-01c78ab3b1f0\") " pod="openstack/ovsdbserver-sb-0" Nov 21 14:02:12 crc kubenswrapper[5133]: I1121 14:02:12.811297 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/799b923b-f086-45a3-b88d-01c78ab3b1f0-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"799b923b-f086-45a3-b88d-01c78ab3b1f0\") " pod="openstack/ovsdbserver-sb-0" Nov 21 14:02:12 crc kubenswrapper[5133]: I1121 14:02:12.811315 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/799b923b-f086-45a3-b88d-01c78ab3b1f0-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"799b923b-f086-45a3-b88d-01c78ab3b1f0\") " pod="openstack/ovsdbserver-sb-0" Nov 21 14:02:12 crc kubenswrapper[5133]: I1121 14:02:12.811342 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"ovsdbserver-sb-0\" (UID: \"799b923b-f086-45a3-b88d-01c78ab3b1f0\") " pod="openstack/ovsdbserver-sb-0" Nov 21 14:02:12 crc kubenswrapper[5133]: I1121 14:02:12.811365 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/799b923b-f086-45a3-b88d-01c78ab3b1f0-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"799b923b-f086-45a3-b88d-01c78ab3b1f0\") " pod="openstack/ovsdbserver-sb-0" Nov 21 14:02:12 crc kubenswrapper[5133]: I1121 14:02:12.812091 5133 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"ovsdbserver-sb-0\" (UID: \"799b923b-f086-45a3-b88d-01c78ab3b1f0\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/ovsdbserver-sb-0" Nov 21 14:02:12 crc kubenswrapper[5133]: I1121 14:02:12.812642 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/799b923b-f086-45a3-b88d-01c78ab3b1f0-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"799b923b-f086-45a3-b88d-01c78ab3b1f0\") " pod="openstack/ovsdbserver-sb-0" Nov 21 14:02:12 crc kubenswrapper[5133]: I1121 14:02:12.813358 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/799b923b-f086-45a3-b88d-01c78ab3b1f0-config\") pod \"ovsdbserver-sb-0\" (UID: \"799b923b-f086-45a3-b88d-01c78ab3b1f0\") " pod="openstack/ovsdbserver-sb-0" Nov 21 14:02:12 crc kubenswrapper[5133]: I1121 14:02:12.814158 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/799b923b-f086-45a3-b88d-01c78ab3b1f0-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"799b923b-f086-45a3-b88d-01c78ab3b1f0\") " pod="openstack/ovsdbserver-sb-0" Nov 21 14:02:12 crc kubenswrapper[5133]: I1121 14:02:12.830231 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/799b923b-f086-45a3-b88d-01c78ab3b1f0-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"799b923b-f086-45a3-b88d-01c78ab3b1f0\") " pod="openstack/ovsdbserver-sb-0" Nov 21 14:02:12 crc kubenswrapper[5133]: I1121 14:02:12.831725 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/799b923b-f086-45a3-b88d-01c78ab3b1f0-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"799b923b-f086-45a3-b88d-01c78ab3b1f0\") " pod="openstack/ovsdbserver-sb-0" Nov 21 14:02:12 crc kubenswrapper[5133]: I1121 14:02:12.833430 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/799b923b-f086-45a3-b88d-01c78ab3b1f0-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"799b923b-f086-45a3-b88d-01c78ab3b1f0\") " pod="openstack/ovsdbserver-sb-0" Nov 21 14:02:12 crc kubenswrapper[5133]: I1121 14:02:12.848677 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h8r57\" (UniqueName: \"kubernetes.io/projected/799b923b-f086-45a3-b88d-01c78ab3b1f0-kube-api-access-h8r57\") pod \"ovsdbserver-sb-0\" (UID: \"799b923b-f086-45a3-b88d-01c78ab3b1f0\") " pod="openstack/ovsdbserver-sb-0" Nov 21 14:02:12 crc kubenswrapper[5133]: I1121 14:02:12.863362 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"ovsdbserver-sb-0\" (UID: \"799b923b-f086-45a3-b88d-01c78ab3b1f0\") " pod="openstack/ovsdbserver-sb-0" Nov 21 14:02:12 crc kubenswrapper[5133]: I1121 14:02:12.924893 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 21 14:02:17 crc kubenswrapper[5133]: I1121 14:02:17.989784 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"507698df-bffb-4ed6-bbad-7e62bb1875f7","Type":"ContainerStarted","Data":"e76898f860aaf18f836ecb3b02128b5cc0e387713ad0f2e540902cfdbcc40132"} Nov 21 14:02:21 crc kubenswrapper[5133]: E1121 14:02:21.953592 5133 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified" Nov 21 14:02:21 crc kubenswrapper[5133]: E1121 14:02:21.954732 5133 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-22d9n,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-server-0_openstack(9aa1caed-f687-4526-a851-59b4d192b705): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 21 14:02:21 crc kubenswrapper[5133]: E1121 14:02:21.955976 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-server-0" podUID="9aa1caed-f687-4526-a851-59b4d192b705" Nov 21 14:02:22 crc kubenswrapper[5133]: E1121 14:02:22.005349 5133 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified" Nov 21 14:02:22 crc kubenswrapper[5133]: E1121 14:02:22.005600 5133 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-f4scz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cell1-server-0_openstack(2c9873f2-025e-499f-8a76-47c38495fd75): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 21 14:02:22 crc kubenswrapper[5133]: E1121 14:02:22.006915 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-cell1-server-0" podUID="2c9873f2-025e-499f-8a76-47c38495fd75" Nov 21 14:02:22 crc kubenswrapper[5133]: E1121 14:02:22.031944 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified\\\"\"" pod="openstack/rabbitmq-server-0" podUID="9aa1caed-f687-4526-a851-59b4d192b705" Nov 21 14:02:22 crc kubenswrapper[5133]: E1121 14:02:22.032285 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified\\\"\"" pod="openstack/rabbitmq-cell1-server-0" podUID="2c9873f2-025e-499f-8a76-47c38495fd75" Nov 21 14:02:23 crc kubenswrapper[5133]: I1121 14:02:23.313918 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 14:02:23 crc kubenswrapper[5133]: I1121 14:02:23.314462 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 14:02:23 crc kubenswrapper[5133]: E1121 14:02:23.814944 5133 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-mariadb:current-podified" Nov 21 14:02:23 crc kubenswrapper[5133]: E1121 14:02:23.815302 5133 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:mysql-bootstrap,Image:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,Command:[bash /var/lib/operator-scripts/mysql_bootstrap.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:True,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:mysql-db,ReadOnly:false,MountPath:/var/lib/mysql,SubPath:mysql,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-default,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-generated,ReadOnly:false,MountPath:/var/lib/config-data/generated,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:operator-scripts,ReadOnly:true,MountPath:/var/lib/operator-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-9sx5q,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-galera-0_openstack(3afd96f5-effd-43e8-8986-b9fc1fd28233): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 21 14:02:23 crc kubenswrapper[5133]: E1121 14:02:23.817813 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/openstack-galera-0" podUID="3afd96f5-effd-43e8-8986-b9fc1fd28233" Nov 21 14:02:24 crc kubenswrapper[5133]: E1121 14:02:24.067450 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\\\"\"" pod="openstack/openstack-galera-0" podUID="3afd96f5-effd-43e8-8986-b9fc1fd28233" Nov 21 14:02:29 crc kubenswrapper[5133]: I1121 14:02:29.126103 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 21 14:02:29 crc kubenswrapper[5133]: E1121 14:02:29.416355 5133 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 21 14:02:29 crc kubenswrapper[5133]: E1121 14:02:29.417271 5133 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-lvgxq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-78dd6ddcc-rdgf8_openstack(4097bd51-e085-4017-9ec5-cfbb86d1cc08): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 21 14:02:29 crc kubenswrapper[5133]: E1121 14:02:29.418789 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-78dd6ddcc-rdgf8" podUID="4097bd51-e085-4017-9ec5-cfbb86d1cc08" Nov 21 14:02:29 crc kubenswrapper[5133]: E1121 14:02:29.474497 5133 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 21 14:02:29 crc kubenswrapper[5133]: E1121 14:02:29.475213 5133 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n659h4h664hbh658h587h67ch89h587h8fh679hc6hf9h55fh644h5d5h698h68dh5cdh5ffh669h54ch9h689hb8hd4h5bfhd8h5d7h5fh665h574q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-t7q2s,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-57d769cc4f-drqnz_openstack(50c34142-ef58-4e11-acce-613c693cc483): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 21 14:02:29 crc kubenswrapper[5133]: E1121 14:02:29.476841 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-57d769cc4f-drqnz" podUID="50c34142-ef58-4e11-acce-613c693cc483" Nov 21 14:02:29 crc kubenswrapper[5133]: E1121 14:02:29.492490 5133 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 21 14:02:29 crc kubenswrapper[5133]: E1121 14:02:29.492758 5133 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n68chd6h679hbfh55fhc6h5ffh5d8h94h56ch589hb4hc5h57bh677hcdh655h8dh667h675h654h66ch567h8fh659h5b4h675h566h55bh54h67dh6dq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-bggrg,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-666b6646f7-k5kck_openstack(86efb203-682b-4933-acc2-14cf239d4770): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 21 14:02:29 crc kubenswrapper[5133]: E1121 14:02:29.494185 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-666b6646f7-k5kck" podUID="86efb203-682b-4933-acc2-14cf239d4770" Nov 21 14:02:29 crc kubenswrapper[5133]: E1121 14:02:29.525015 5133 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 21 14:02:29 crc kubenswrapper[5133]: E1121 14:02:29.525289 5133 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-6pfgv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-675f4bcbfc-82rwv_openstack(6a14f3d4-e942-46bd-9f9a-af43b1856214): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 21 14:02:29 crc kubenswrapper[5133]: E1121 14:02:29.527132 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-675f4bcbfc-82rwv" podUID="6a14f3d4-e942-46bd-9f9a-af43b1856214" Nov 21 14:02:29 crc kubenswrapper[5133]: W1121 14:02:29.884563 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod866a2674_f006_4883_9598_6902879561e6.slice/crio-a6da2f6d0dae395dd621890330b00cebafe58ec3e03e3eb3d43d33d2eb1e2aaa WatchSource:0}: Error finding container a6da2f6d0dae395dd621890330b00cebafe58ec3e03e3eb3d43d33d2eb1e2aaa: Status 404 returned error can't find the container with id a6da2f6d0dae395dd621890330b00cebafe58ec3e03e3eb3d43d33d2eb1e2aaa Nov 21 14:02:29 crc kubenswrapper[5133]: I1121 14:02:29.886464 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 21 14:02:29 crc kubenswrapper[5133]: I1121 14:02:29.984518 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-2ckfn"] Nov 21 14:02:30 crc kubenswrapper[5133]: I1121 14:02:30.074479 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 21 14:02:30 crc kubenswrapper[5133]: W1121 14:02:30.078049 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2e94f41c_a61c_4502_b9c6_11bcaf9054dd.slice/crio-d2e251735789004be4c601bcdf7851dfbebd687cbb36d4e20c25709521d24d66 WatchSource:0}: Error finding container d2e251735789004be4c601bcdf7851dfbebd687cbb36d4e20c25709521d24d66: Status 404 returned error can't find the container with id d2e251735789004be4c601bcdf7851dfbebd687cbb36d4e20c25709521d24d66 Nov 21 14:02:30 crc kubenswrapper[5133]: I1121 14:02:30.128572 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"799b923b-f086-45a3-b88d-01c78ab3b1f0","Type":"ContainerStarted","Data":"a2e8991c017bed24be1de96f14c7a1a9771cfcf650a45091adafe50b6685844d"} Nov 21 14:02:30 crc kubenswrapper[5133]: I1121 14:02:30.132970 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"866a2674-f006-4883-9598-6902879561e6","Type":"ContainerStarted","Data":"a6da2f6d0dae395dd621890330b00cebafe58ec3e03e3eb3d43d33d2eb1e2aaa"} Nov 21 14:02:30 crc kubenswrapper[5133]: I1121 14:02:30.134971 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"507698df-bffb-4ed6-bbad-7e62bb1875f7","Type":"ContainerStarted","Data":"e82fe51e7e098f7d8a66927fd6ecd18ee5655181d5c819a5433954677571159f"} Nov 21 14:02:30 crc kubenswrapper[5133]: I1121 14:02:30.135088 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Nov 21 14:02:30 crc kubenswrapper[5133]: I1121 14:02:30.136443 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"65613fe9-4492-410c-9be2-2d01dcb1e085","Type":"ContainerStarted","Data":"27d322bf37793ba0fd9cbad7591955eb284e92c6cb7f75304750f22a19c39ce7"} Nov 21 14:02:30 crc kubenswrapper[5133]: I1121 14:02:30.138814 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-2ckfn" event={"ID":"6afb5f16-3806-4fbf-becf-8bf66576286f","Type":"ContainerStarted","Data":"8351ac82f2a590c55877604140b52d2be9c8587b4e528751fe91cb46ac372fb2"} Nov 21 14:02:30 crc kubenswrapper[5133]: I1121 14:02:30.141174 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"2e94f41c-a61c-4502-b9c6-11bcaf9054dd","Type":"ContainerStarted","Data":"d2e251735789004be4c601bcdf7851dfbebd687cbb36d4e20c25709521d24d66"} Nov 21 14:02:30 crc kubenswrapper[5133]: E1121 14:02:30.145376 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified\\\"\"" pod="openstack/dnsmasq-dns-666b6646f7-k5kck" podUID="86efb203-682b-4933-acc2-14cf239d4770" Nov 21 14:02:30 crc kubenswrapper[5133]: E1121 14:02:30.145853 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified\\\"\"" pod="openstack/dnsmasq-dns-57d769cc4f-drqnz" podUID="50c34142-ef58-4e11-acce-613c693cc483" Nov 21 14:02:30 crc kubenswrapper[5133]: I1121 14:02:30.155101 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=14.986760203 podStartE2EDuration="27.155054997s" podCreationTimestamp="2025-11-21 14:02:03 +0000 UTC" firstStartedPulling="2025-11-21 14:02:17.282036514 +0000 UTC m=+1197.079868792" lastFinishedPulling="2025-11-21 14:02:29.450331298 +0000 UTC m=+1209.248163586" observedRunningTime="2025-11-21 14:02:30.155014715 +0000 UTC m=+1209.952846963" watchObservedRunningTime="2025-11-21 14:02:30.155054997 +0000 UTC m=+1209.952887235" Nov 21 14:02:30 crc kubenswrapper[5133]: I1121 14:02:30.497945 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-rdgf8" Nov 21 14:02:30 crc kubenswrapper[5133]: I1121 14:02:30.552554 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-82rwv" Nov 21 14:02:30 crc kubenswrapper[5133]: I1121 14:02:30.619618 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4097bd51-e085-4017-9ec5-cfbb86d1cc08-config\") pod \"4097bd51-e085-4017-9ec5-cfbb86d1cc08\" (UID: \"4097bd51-e085-4017-9ec5-cfbb86d1cc08\") " Nov 21 14:02:30 crc kubenswrapper[5133]: I1121 14:02:30.619697 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4097bd51-e085-4017-9ec5-cfbb86d1cc08-dns-svc\") pod \"4097bd51-e085-4017-9ec5-cfbb86d1cc08\" (UID: \"4097bd51-e085-4017-9ec5-cfbb86d1cc08\") " Nov 21 14:02:30 crc kubenswrapper[5133]: I1121 14:02:30.619852 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6a14f3d4-e942-46bd-9f9a-af43b1856214-config\") pod \"6a14f3d4-e942-46bd-9f9a-af43b1856214\" (UID: \"6a14f3d4-e942-46bd-9f9a-af43b1856214\") " Nov 21 14:02:30 crc kubenswrapper[5133]: I1121 14:02:30.620036 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lvgxq\" (UniqueName: \"kubernetes.io/projected/4097bd51-e085-4017-9ec5-cfbb86d1cc08-kube-api-access-lvgxq\") pod \"4097bd51-e085-4017-9ec5-cfbb86d1cc08\" (UID: \"4097bd51-e085-4017-9ec5-cfbb86d1cc08\") " Nov 21 14:02:30 crc kubenswrapper[5133]: I1121 14:02:30.620070 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6pfgv\" (UniqueName: \"kubernetes.io/projected/6a14f3d4-e942-46bd-9f9a-af43b1856214-kube-api-access-6pfgv\") pod \"6a14f3d4-e942-46bd-9f9a-af43b1856214\" (UID: \"6a14f3d4-e942-46bd-9f9a-af43b1856214\") " Nov 21 14:02:30 crc kubenswrapper[5133]: I1121 14:02:30.620456 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4097bd51-e085-4017-9ec5-cfbb86d1cc08-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "4097bd51-e085-4017-9ec5-cfbb86d1cc08" (UID: "4097bd51-e085-4017-9ec5-cfbb86d1cc08"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:02:30 crc kubenswrapper[5133]: I1121 14:02:30.620558 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6a14f3d4-e942-46bd-9f9a-af43b1856214-config" (OuterVolumeSpecName: "config") pod "6a14f3d4-e942-46bd-9f9a-af43b1856214" (UID: "6a14f3d4-e942-46bd-9f9a-af43b1856214"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:02:30 crc kubenswrapper[5133]: I1121 14:02:30.621087 5133 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4097bd51-e085-4017-9ec5-cfbb86d1cc08-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 21 14:02:30 crc kubenswrapper[5133]: I1121 14:02:30.621110 5133 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6a14f3d4-e942-46bd-9f9a-af43b1856214-config\") on node \"crc\" DevicePath \"\"" Nov 21 14:02:30 crc kubenswrapper[5133]: I1121 14:02:30.621863 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4097bd51-e085-4017-9ec5-cfbb86d1cc08-config" (OuterVolumeSpecName: "config") pod "4097bd51-e085-4017-9ec5-cfbb86d1cc08" (UID: "4097bd51-e085-4017-9ec5-cfbb86d1cc08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:02:30 crc kubenswrapper[5133]: I1121 14:02:30.627831 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6a14f3d4-e942-46bd-9f9a-af43b1856214-kube-api-access-6pfgv" (OuterVolumeSpecName: "kube-api-access-6pfgv") pod "6a14f3d4-e942-46bd-9f9a-af43b1856214" (UID: "6a14f3d4-e942-46bd-9f9a-af43b1856214"). InnerVolumeSpecName "kube-api-access-6pfgv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:02:30 crc kubenswrapper[5133]: I1121 14:02:30.632600 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4097bd51-e085-4017-9ec5-cfbb86d1cc08-kube-api-access-lvgxq" (OuterVolumeSpecName: "kube-api-access-lvgxq") pod "4097bd51-e085-4017-9ec5-cfbb86d1cc08" (UID: "4097bd51-e085-4017-9ec5-cfbb86d1cc08"). InnerVolumeSpecName "kube-api-access-lvgxq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:02:30 crc kubenswrapper[5133]: I1121 14:02:30.723029 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lvgxq\" (UniqueName: \"kubernetes.io/projected/4097bd51-e085-4017-9ec5-cfbb86d1cc08-kube-api-access-lvgxq\") on node \"crc\" DevicePath \"\"" Nov 21 14:02:30 crc kubenswrapper[5133]: I1121 14:02:30.723277 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6pfgv\" (UniqueName: \"kubernetes.io/projected/6a14f3d4-e942-46bd-9f9a-af43b1856214-kube-api-access-6pfgv\") on node \"crc\" DevicePath \"\"" Nov 21 14:02:30 crc kubenswrapper[5133]: I1121 14:02:30.723361 5133 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4097bd51-e085-4017-9ec5-cfbb86d1cc08-config\") on node \"crc\" DevicePath \"\"" Nov 21 14:02:30 crc kubenswrapper[5133]: I1121 14:02:30.941891 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-hbfsj"] Nov 21 14:02:31 crc kubenswrapper[5133]: I1121 14:02:31.154275 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-82rwv" event={"ID":"6a14f3d4-e942-46bd-9f9a-af43b1856214","Type":"ContainerDied","Data":"cdb28e5f9dc955a553ec7fd2eb87aa9de60287435b56f6287efce6f7d42a2e2d"} Nov 21 14:02:31 crc kubenswrapper[5133]: I1121 14:02:31.154325 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-82rwv" Nov 21 14:02:31 crc kubenswrapper[5133]: I1121 14:02:31.159440 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-rdgf8" event={"ID":"4097bd51-e085-4017-9ec5-cfbb86d1cc08","Type":"ContainerDied","Data":"bf4d0338aa894406f66627466c1f1fdbcff51a22f424ea690cb88987a7f9409c"} Nov 21 14:02:31 crc kubenswrapper[5133]: I1121 14:02:31.159530 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-rdgf8" Nov 21 14:02:31 crc kubenswrapper[5133]: I1121 14:02:31.210732 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-82rwv"] Nov 21 14:02:31 crc kubenswrapper[5133]: I1121 14:02:31.217121 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-82rwv"] Nov 21 14:02:31 crc kubenswrapper[5133]: I1121 14:02:31.253699 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-rdgf8"] Nov 21 14:02:31 crc kubenswrapper[5133]: I1121 14:02:31.287450 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-rdgf8"] Nov 21 14:02:32 crc kubenswrapper[5133]: I1121 14:02:32.170901 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-hbfsj" event={"ID":"a1df22cc-e9c1-48b2-a807-ea8e93f8d366","Type":"ContainerStarted","Data":"7f1f7256bab5ba011a5b5dfec3a179146af153072a0f3790eca69b4eafb9f9d8"} Nov 21 14:02:32 crc kubenswrapper[5133]: I1121 14:02:32.472153 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4097bd51-e085-4017-9ec5-cfbb86d1cc08" path="/var/lib/kubelet/pods/4097bd51-e085-4017-9ec5-cfbb86d1cc08/volumes" Nov 21 14:02:32 crc kubenswrapper[5133]: I1121 14:02:32.472564 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6a14f3d4-e942-46bd-9f9a-af43b1856214" path="/var/lib/kubelet/pods/6a14f3d4-e942-46bd-9f9a-af43b1856214/volumes" Nov 21 14:02:34 crc kubenswrapper[5133]: I1121 14:02:34.199116 5133 generic.go:334] "Generic (PLEG): container finished" podID="65613fe9-4492-410c-9be2-2d01dcb1e085" containerID="27d322bf37793ba0fd9cbad7591955eb284e92c6cb7f75304750f22a19c39ce7" exitCode=0 Nov 21 14:02:34 crc kubenswrapper[5133]: I1121 14:02:34.199698 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"65613fe9-4492-410c-9be2-2d01dcb1e085","Type":"ContainerDied","Data":"27d322bf37793ba0fd9cbad7591955eb284e92c6cb7f75304750f22a19c39ce7"} Nov 21 14:02:35 crc kubenswrapper[5133]: I1121 14:02:35.215373 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-2ckfn" event={"ID":"6afb5f16-3806-4fbf-becf-8bf66576286f","Type":"ContainerStarted","Data":"218d3a8d01b4e59c3d483f45e59d7f80af450db8ca98400bb70c55019cd23a9e"} Nov 21 14:02:35 crc kubenswrapper[5133]: I1121 14:02:35.215930 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-2ckfn" Nov 21 14:02:35 crc kubenswrapper[5133]: I1121 14:02:35.219730 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"2e94f41c-a61c-4502-b9c6-11bcaf9054dd","Type":"ContainerStarted","Data":"54cc1ff0e268604ed6c8cd8ef440e1da873254c0be3bb08a08fddcf502ea5621"} Nov 21 14:02:35 crc kubenswrapper[5133]: I1121 14:02:35.222327 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"799b923b-f086-45a3-b88d-01c78ab3b1f0","Type":"ContainerStarted","Data":"b1c1c679eda8892975331d8a035d8589578fb0452ac503f9a061ec5afc93061a"} Nov 21 14:02:35 crc kubenswrapper[5133]: I1121 14:02:35.224025 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"866a2674-f006-4883-9598-6902879561e6","Type":"ContainerStarted","Data":"80eb98ef67b269d2835b33e186b85ab1360097b890c1e8139b27442200003800"} Nov 21 14:02:35 crc kubenswrapper[5133]: I1121 14:02:35.224298 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 21 14:02:35 crc kubenswrapper[5133]: I1121 14:02:35.225830 5133 generic.go:334] "Generic (PLEG): container finished" podID="a1df22cc-e9c1-48b2-a807-ea8e93f8d366" containerID="6c179f8bd795cda5da87b6835da922ce3b6099ead2f0560f9803b35d70e4a59d" exitCode=0 Nov 21 14:02:35 crc kubenswrapper[5133]: I1121 14:02:35.227174 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-hbfsj" event={"ID":"a1df22cc-e9c1-48b2-a807-ea8e93f8d366","Type":"ContainerDied","Data":"6c179f8bd795cda5da87b6835da922ce3b6099ead2f0560f9803b35d70e4a59d"} Nov 21 14:02:35 crc kubenswrapper[5133]: I1121 14:02:35.231227 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"65613fe9-4492-410c-9be2-2d01dcb1e085","Type":"ContainerStarted","Data":"6b98443eb3e8f8e0463fe74225fcfd0ec37a7b617f91656e4e2b005f1bfe70c2"} Nov 21 14:02:35 crc kubenswrapper[5133]: I1121 14:02:35.239319 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-2ckfn" podStartSLOduration=23.295227021 podStartE2EDuration="27.239293165s" podCreationTimestamp="2025-11-21 14:02:08 +0000 UTC" firstStartedPulling="2025-11-21 14:02:30.009140594 +0000 UTC m=+1209.806972842" lastFinishedPulling="2025-11-21 14:02:33.953206738 +0000 UTC m=+1213.751038986" observedRunningTime="2025-11-21 14:02:35.235214096 +0000 UTC m=+1215.033046354" watchObservedRunningTime="2025-11-21 14:02:35.239293165 +0000 UTC m=+1215.037125413" Nov 21 14:02:35 crc kubenswrapper[5133]: I1121 14:02:35.293439 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=25.556092463 podStartE2EDuration="30.293413213s" podCreationTimestamp="2025-11-21 14:02:05 +0000 UTC" firstStartedPulling="2025-11-21 14:02:29.887276165 +0000 UTC m=+1209.685108413" lastFinishedPulling="2025-11-21 14:02:34.624596915 +0000 UTC m=+1214.422429163" observedRunningTime="2025-11-21 14:02:35.282598773 +0000 UTC m=+1215.080431031" watchObservedRunningTime="2025-11-21 14:02:35.293413213 +0000 UTC m=+1215.091245471" Nov 21 14:02:35 crc kubenswrapper[5133]: I1121 14:02:35.315278 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=9.090557856 podStartE2EDuration="34.315250537s" podCreationTimestamp="2025-11-21 14:02:01 +0000 UTC" firstStartedPulling="2025-11-21 14:02:04.204669147 +0000 UTC m=+1184.002501395" lastFinishedPulling="2025-11-21 14:02:29.429361798 +0000 UTC m=+1209.227194076" observedRunningTime="2025-11-21 14:02:35.306838992 +0000 UTC m=+1215.104671280" watchObservedRunningTime="2025-11-21 14:02:35.315250537 +0000 UTC m=+1215.113082785" Nov 21 14:02:36 crc kubenswrapper[5133]: I1121 14:02:36.254024 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-hbfsj" event={"ID":"a1df22cc-e9c1-48b2-a807-ea8e93f8d366","Type":"ContainerStarted","Data":"50dbb2642768782a90f98812e9a6f161005f4bfe53d13e0def8a7a72be9cf94b"} Nov 21 14:02:36 crc kubenswrapper[5133]: I1121 14:02:36.254528 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-hbfsj" event={"ID":"a1df22cc-e9c1-48b2-a807-ea8e93f8d366","Type":"ContainerStarted","Data":"0238af0e97b2f7e4fe113c0c71e62349b3ee63f4a547cd84f1646b4caf1bff84"} Nov 21 14:02:36 crc kubenswrapper[5133]: I1121 14:02:36.283480 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-hbfsj" podStartSLOduration=25.73000144 podStartE2EDuration="28.283452472s" podCreationTimestamp="2025-11-21 14:02:08 +0000 UTC" firstStartedPulling="2025-11-21 14:02:31.369428835 +0000 UTC m=+1211.167261083" lastFinishedPulling="2025-11-21 14:02:33.922879867 +0000 UTC m=+1213.720712115" observedRunningTime="2025-11-21 14:02:36.282608879 +0000 UTC m=+1216.080441127" watchObservedRunningTime="2025-11-21 14:02:36.283452472 +0000 UTC m=+1216.081284720" Nov 21 14:02:37 crc kubenswrapper[5133]: I1121 14:02:37.266332 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-hbfsj" Nov 21 14:02:37 crc kubenswrapper[5133]: I1121 14:02:37.266875 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-hbfsj" Nov 21 14:02:38 crc kubenswrapper[5133]: I1121 14:02:38.280201 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"3afd96f5-effd-43e8-8986-b9fc1fd28233","Type":"ContainerStarted","Data":"8b60192a426f1016f501d0ff258c988f0b52978ea40f48665c29814acc9cf64d"} Nov 21 14:02:38 crc kubenswrapper[5133]: I1121 14:02:38.283872 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"799b923b-f086-45a3-b88d-01c78ab3b1f0","Type":"ContainerStarted","Data":"10305a7c3efd76e92dcf1196e3838737a67e738cfe029d456a994727cefda111"} Nov 21 14:02:38 crc kubenswrapper[5133]: I1121 14:02:38.286694 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"2e94f41c-a61c-4502-b9c6-11bcaf9054dd","Type":"ContainerStarted","Data":"6353380893ce680e5e22feab8c678dd7617064d963f148a3b393b423d7d99ddf"} Nov 21 14:02:38 crc kubenswrapper[5133]: I1121 14:02:38.364206 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=19.42176164 podStartE2EDuration="27.364179271s" podCreationTimestamp="2025-11-21 14:02:11 +0000 UTC" firstStartedPulling="2025-11-21 14:02:29.423360287 +0000 UTC m=+1209.221192575" lastFinishedPulling="2025-11-21 14:02:37.365777958 +0000 UTC m=+1217.163610206" observedRunningTime="2025-11-21 14:02:38.355470638 +0000 UTC m=+1218.153302906" watchObservedRunningTime="2025-11-21 14:02:38.364179271 +0000 UTC m=+1218.162011519" Nov 21 14:02:38 crc kubenswrapper[5133]: I1121 14:02:38.389887 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=23.069513425 podStartE2EDuration="30.389863758s" podCreationTimestamp="2025-11-21 14:02:08 +0000 UTC" firstStartedPulling="2025-11-21 14:02:30.080923264 +0000 UTC m=+1209.878755512" lastFinishedPulling="2025-11-21 14:02:37.401273597 +0000 UTC m=+1217.199105845" observedRunningTime="2025-11-21 14:02:38.387408812 +0000 UTC m=+1218.185241060" watchObservedRunningTime="2025-11-21 14:02:38.389863758 +0000 UTC m=+1218.187696006" Nov 21 14:02:38 crc kubenswrapper[5133]: I1121 14:02:38.641139 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Nov 21 14:02:39 crc kubenswrapper[5133]: I1121 14:02:39.310329 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"9aa1caed-f687-4526-a851-59b4d192b705","Type":"ContainerStarted","Data":"12333d78e0e6447e201e2ddaf5c5f5c05242a23fc2120dc8a1be00a6d2499df8"} Nov 21 14:02:39 crc kubenswrapper[5133]: I1121 14:02:39.319099 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"2c9873f2-025e-499f-8a76-47c38495fd75","Type":"ContainerStarted","Data":"7743d3ae5f3d1730d9ae8514c383685ab59cb65808a01679f0e73dc16f30e218"} Nov 21 14:02:39 crc kubenswrapper[5133]: I1121 14:02:39.397720 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Nov 21 14:02:39 crc kubenswrapper[5133]: I1121 14:02:39.397782 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Nov 21 14:02:39 crc kubenswrapper[5133]: I1121 14:02:39.436916 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Nov 21 14:02:39 crc kubenswrapper[5133]: I1121 14:02:39.925066 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Nov 21 14:02:39 crc kubenswrapper[5133]: I1121 14:02:39.973832 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Nov 21 14:02:40 crc kubenswrapper[5133]: I1121 14:02:40.325628 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Nov 21 14:02:40 crc kubenswrapper[5133]: I1121 14:02:40.378450 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Nov 21 14:02:40 crc kubenswrapper[5133]: I1121 14:02:40.379732 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Nov 21 14:02:40 crc kubenswrapper[5133]: I1121 14:02:40.573113 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-drqnz"] Nov 21 14:02:40 crc kubenswrapper[5133]: I1121 14:02:40.608582 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5bf47b49b7-pbxzg"] Nov 21 14:02:40 crc kubenswrapper[5133]: I1121 14:02:40.610130 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bf47b49b7-pbxzg" Nov 21 14:02:40 crc kubenswrapper[5133]: I1121 14:02:40.615492 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Nov 21 14:02:40 crc kubenswrapper[5133]: I1121 14:02:40.642290 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5bf47b49b7-pbxzg"] Nov 21 14:02:40 crc kubenswrapper[5133]: I1121 14:02:40.653626 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-xnvhj"] Nov 21 14:02:40 crc kubenswrapper[5133]: I1121 14:02:40.657633 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-xnvhj" Nov 21 14:02:40 crc kubenswrapper[5133]: I1121 14:02:40.669720 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Nov 21 14:02:40 crc kubenswrapper[5133]: I1121 14:02:40.717805 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-xnvhj"] Nov 21 14:02:40 crc kubenswrapper[5133]: I1121 14:02:40.762573 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77732c59-506b-4d76-9f92-efe5357ed5e9-combined-ca-bundle\") pod \"ovn-controller-metrics-xnvhj\" (UID: \"77732c59-506b-4d76-9f92-efe5357ed5e9\") " pod="openstack/ovn-controller-metrics-xnvhj" Nov 21 14:02:40 crc kubenswrapper[5133]: I1121 14:02:40.762645 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/20185974-caab-4948-ae2b-159ad95e3f21-config\") pod \"dnsmasq-dns-5bf47b49b7-pbxzg\" (UID: \"20185974-caab-4948-ae2b-159ad95e3f21\") " pod="openstack/dnsmasq-dns-5bf47b49b7-pbxzg" Nov 21 14:02:40 crc kubenswrapper[5133]: I1121 14:02:40.762702 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6zqbz\" (UniqueName: \"kubernetes.io/projected/20185974-caab-4948-ae2b-159ad95e3f21-kube-api-access-6zqbz\") pod \"dnsmasq-dns-5bf47b49b7-pbxzg\" (UID: \"20185974-caab-4948-ae2b-159ad95e3f21\") " pod="openstack/dnsmasq-dns-5bf47b49b7-pbxzg" Nov 21 14:02:40 crc kubenswrapper[5133]: I1121 14:02:40.762784 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/77732c59-506b-4d76-9f92-efe5357ed5e9-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-xnvhj\" (UID: \"77732c59-506b-4d76-9f92-efe5357ed5e9\") " pod="openstack/ovn-controller-metrics-xnvhj" Nov 21 14:02:40 crc kubenswrapper[5133]: I1121 14:02:40.762821 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/77732c59-506b-4d76-9f92-efe5357ed5e9-ovn-rundir\") pod \"ovn-controller-metrics-xnvhj\" (UID: \"77732c59-506b-4d76-9f92-efe5357ed5e9\") " pod="openstack/ovn-controller-metrics-xnvhj" Nov 21 14:02:40 crc kubenswrapper[5133]: I1121 14:02:40.762864 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/20185974-caab-4948-ae2b-159ad95e3f21-ovsdbserver-nb\") pod \"dnsmasq-dns-5bf47b49b7-pbxzg\" (UID: \"20185974-caab-4948-ae2b-159ad95e3f21\") " pod="openstack/dnsmasq-dns-5bf47b49b7-pbxzg" Nov 21 14:02:40 crc kubenswrapper[5133]: I1121 14:02:40.762893 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/77732c59-506b-4d76-9f92-efe5357ed5e9-ovs-rundir\") pod \"ovn-controller-metrics-xnvhj\" (UID: \"77732c59-506b-4d76-9f92-efe5357ed5e9\") " pod="openstack/ovn-controller-metrics-xnvhj" Nov 21 14:02:40 crc kubenswrapper[5133]: I1121 14:02:40.762918 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c7lwd\" (UniqueName: \"kubernetes.io/projected/77732c59-506b-4d76-9f92-efe5357ed5e9-kube-api-access-c7lwd\") pod \"ovn-controller-metrics-xnvhj\" (UID: \"77732c59-506b-4d76-9f92-efe5357ed5e9\") " pod="openstack/ovn-controller-metrics-xnvhj" Nov 21 14:02:40 crc kubenswrapper[5133]: I1121 14:02:40.762947 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/77732c59-506b-4d76-9f92-efe5357ed5e9-config\") pod \"ovn-controller-metrics-xnvhj\" (UID: \"77732c59-506b-4d76-9f92-efe5357ed5e9\") " pod="openstack/ovn-controller-metrics-xnvhj" Nov 21 14:02:40 crc kubenswrapper[5133]: I1121 14:02:40.763053 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/20185974-caab-4948-ae2b-159ad95e3f21-dns-svc\") pod \"dnsmasq-dns-5bf47b49b7-pbxzg\" (UID: \"20185974-caab-4948-ae2b-159ad95e3f21\") " pod="openstack/dnsmasq-dns-5bf47b49b7-pbxzg" Nov 21 14:02:40 crc kubenswrapper[5133]: I1121 14:02:40.779644 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-k5kck"] Nov 21 14:02:40 crc kubenswrapper[5133]: I1121 14:02:40.819102 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Nov 21 14:02:40 crc kubenswrapper[5133]: I1121 14:02:40.821214 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 21 14:02:40 crc kubenswrapper[5133]: I1121 14:02:40.833189 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-l89zp" Nov 21 14:02:40 crc kubenswrapper[5133]: I1121 14:02:40.833402 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Nov 21 14:02:40 crc kubenswrapper[5133]: I1121 14:02:40.833522 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Nov 21 14:02:40 crc kubenswrapper[5133]: I1121 14:02:40.835979 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-8554648995-bdzkx"] Nov 21 14:02:40 crc kubenswrapper[5133]: I1121 14:02:40.838092 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-bdzkx" Nov 21 14:02:40 crc kubenswrapper[5133]: I1121 14:02:40.838108 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Nov 21 14:02:40 crc kubenswrapper[5133]: I1121 14:02:40.844069 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Nov 21 14:02:40 crc kubenswrapper[5133]: I1121 14:02:40.847174 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 21 14:02:40 crc kubenswrapper[5133]: I1121 14:02:40.864824 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8554648995-bdzkx"] Nov 21 14:02:40 crc kubenswrapper[5133]: I1121 14:02:40.865967 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/77732c59-506b-4d76-9f92-efe5357ed5e9-ovn-rundir\") pod \"ovn-controller-metrics-xnvhj\" (UID: \"77732c59-506b-4d76-9f92-efe5357ed5e9\") " pod="openstack/ovn-controller-metrics-xnvhj" Nov 21 14:02:40 crc kubenswrapper[5133]: I1121 14:02:40.867543 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/20185974-caab-4948-ae2b-159ad95e3f21-ovsdbserver-nb\") pod \"dnsmasq-dns-5bf47b49b7-pbxzg\" (UID: \"20185974-caab-4948-ae2b-159ad95e3f21\") " pod="openstack/dnsmasq-dns-5bf47b49b7-pbxzg" Nov 21 14:02:40 crc kubenswrapper[5133]: I1121 14:02:40.867595 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/77732c59-506b-4d76-9f92-efe5357ed5e9-ovs-rundir\") pod \"ovn-controller-metrics-xnvhj\" (UID: \"77732c59-506b-4d76-9f92-efe5357ed5e9\") " pod="openstack/ovn-controller-metrics-xnvhj" Nov 21 14:02:40 crc kubenswrapper[5133]: I1121 14:02:40.867623 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c7lwd\" (UniqueName: \"kubernetes.io/projected/77732c59-506b-4d76-9f92-efe5357ed5e9-kube-api-access-c7lwd\") pod \"ovn-controller-metrics-xnvhj\" (UID: \"77732c59-506b-4d76-9f92-efe5357ed5e9\") " pod="openstack/ovn-controller-metrics-xnvhj" Nov 21 14:02:40 crc kubenswrapper[5133]: I1121 14:02:40.867649 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/77732c59-506b-4d76-9f92-efe5357ed5e9-config\") pod \"ovn-controller-metrics-xnvhj\" (UID: \"77732c59-506b-4d76-9f92-efe5357ed5e9\") " pod="openstack/ovn-controller-metrics-xnvhj" Nov 21 14:02:40 crc kubenswrapper[5133]: I1121 14:02:40.867705 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/20185974-caab-4948-ae2b-159ad95e3f21-dns-svc\") pod \"dnsmasq-dns-5bf47b49b7-pbxzg\" (UID: \"20185974-caab-4948-ae2b-159ad95e3f21\") " pod="openstack/dnsmasq-dns-5bf47b49b7-pbxzg" Nov 21 14:02:40 crc kubenswrapper[5133]: I1121 14:02:40.867744 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77732c59-506b-4d76-9f92-efe5357ed5e9-combined-ca-bundle\") pod \"ovn-controller-metrics-xnvhj\" (UID: \"77732c59-506b-4d76-9f92-efe5357ed5e9\") " pod="openstack/ovn-controller-metrics-xnvhj" Nov 21 14:02:40 crc kubenswrapper[5133]: I1121 14:02:40.867796 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/20185974-caab-4948-ae2b-159ad95e3f21-config\") pod \"dnsmasq-dns-5bf47b49b7-pbxzg\" (UID: \"20185974-caab-4948-ae2b-159ad95e3f21\") " pod="openstack/dnsmasq-dns-5bf47b49b7-pbxzg" Nov 21 14:02:40 crc kubenswrapper[5133]: I1121 14:02:40.867843 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6zqbz\" (UniqueName: \"kubernetes.io/projected/20185974-caab-4948-ae2b-159ad95e3f21-kube-api-access-6zqbz\") pod \"dnsmasq-dns-5bf47b49b7-pbxzg\" (UID: \"20185974-caab-4948-ae2b-159ad95e3f21\") " pod="openstack/dnsmasq-dns-5bf47b49b7-pbxzg" Nov 21 14:02:40 crc kubenswrapper[5133]: I1121 14:02:40.867865 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/77732c59-506b-4d76-9f92-efe5357ed5e9-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-xnvhj\" (UID: \"77732c59-506b-4d76-9f92-efe5357ed5e9\") " pod="openstack/ovn-controller-metrics-xnvhj" Nov 21 14:02:40 crc kubenswrapper[5133]: I1121 14:02:40.869493 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/77732c59-506b-4d76-9f92-efe5357ed5e9-ovn-rundir\") pod \"ovn-controller-metrics-xnvhj\" (UID: \"77732c59-506b-4d76-9f92-efe5357ed5e9\") " pod="openstack/ovn-controller-metrics-xnvhj" Nov 21 14:02:40 crc kubenswrapper[5133]: I1121 14:02:40.870239 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/20185974-caab-4948-ae2b-159ad95e3f21-dns-svc\") pod \"dnsmasq-dns-5bf47b49b7-pbxzg\" (UID: \"20185974-caab-4948-ae2b-159ad95e3f21\") " pod="openstack/dnsmasq-dns-5bf47b49b7-pbxzg" Nov 21 14:02:40 crc kubenswrapper[5133]: I1121 14:02:40.870804 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/20185974-caab-4948-ae2b-159ad95e3f21-ovsdbserver-nb\") pod \"dnsmasq-dns-5bf47b49b7-pbxzg\" (UID: \"20185974-caab-4948-ae2b-159ad95e3f21\") " pod="openstack/dnsmasq-dns-5bf47b49b7-pbxzg" Nov 21 14:02:40 crc kubenswrapper[5133]: I1121 14:02:40.870879 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/77732c59-506b-4d76-9f92-efe5357ed5e9-ovs-rundir\") pod \"ovn-controller-metrics-xnvhj\" (UID: \"77732c59-506b-4d76-9f92-efe5357ed5e9\") " pod="openstack/ovn-controller-metrics-xnvhj" Nov 21 14:02:40 crc kubenswrapper[5133]: I1121 14:02:40.871131 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/20185974-caab-4948-ae2b-159ad95e3f21-config\") pod \"dnsmasq-dns-5bf47b49b7-pbxzg\" (UID: \"20185974-caab-4948-ae2b-159ad95e3f21\") " pod="openstack/dnsmasq-dns-5bf47b49b7-pbxzg" Nov 21 14:02:40 crc kubenswrapper[5133]: I1121 14:02:40.875370 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/77732c59-506b-4d76-9f92-efe5357ed5e9-config\") pod \"ovn-controller-metrics-xnvhj\" (UID: \"77732c59-506b-4d76-9f92-efe5357ed5e9\") " pod="openstack/ovn-controller-metrics-xnvhj" Nov 21 14:02:40 crc kubenswrapper[5133]: I1121 14:02:40.883391 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77732c59-506b-4d76-9f92-efe5357ed5e9-combined-ca-bundle\") pod \"ovn-controller-metrics-xnvhj\" (UID: \"77732c59-506b-4d76-9f92-efe5357ed5e9\") " pod="openstack/ovn-controller-metrics-xnvhj" Nov 21 14:02:40 crc kubenswrapper[5133]: I1121 14:02:40.892694 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/77732c59-506b-4d76-9f92-efe5357ed5e9-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-xnvhj\" (UID: \"77732c59-506b-4d76-9f92-efe5357ed5e9\") " pod="openstack/ovn-controller-metrics-xnvhj" Nov 21 14:02:40 crc kubenswrapper[5133]: I1121 14:02:40.899011 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6zqbz\" (UniqueName: \"kubernetes.io/projected/20185974-caab-4948-ae2b-159ad95e3f21-kube-api-access-6zqbz\") pod \"dnsmasq-dns-5bf47b49b7-pbxzg\" (UID: \"20185974-caab-4948-ae2b-159ad95e3f21\") " pod="openstack/dnsmasq-dns-5bf47b49b7-pbxzg" Nov 21 14:02:40 crc kubenswrapper[5133]: I1121 14:02:40.929185 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c7lwd\" (UniqueName: \"kubernetes.io/projected/77732c59-506b-4d76-9f92-efe5357ed5e9-kube-api-access-c7lwd\") pod \"ovn-controller-metrics-xnvhj\" (UID: \"77732c59-506b-4d76-9f92-efe5357ed5e9\") " pod="openstack/ovn-controller-metrics-xnvhj" Nov 21 14:02:40 crc kubenswrapper[5133]: I1121 14:02:40.955307 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bf47b49b7-pbxzg" Nov 21 14:02:40 crc kubenswrapper[5133]: I1121 14:02:40.973446 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/ac4c9909-c38c-4076-a8a5-ee3aa66a9630-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"ac4c9909-c38c-4076-a8a5-ee3aa66a9630\") " pod="openstack/ovn-northd-0" Nov 21 14:02:40 crc kubenswrapper[5133]: I1121 14:02:40.973500 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gchfh\" (UniqueName: \"kubernetes.io/projected/ac4c9909-c38c-4076-a8a5-ee3aa66a9630-kube-api-access-gchfh\") pod \"ovn-northd-0\" (UID: \"ac4c9909-c38c-4076-a8a5-ee3aa66a9630\") " pod="openstack/ovn-northd-0" Nov 21 14:02:40 crc kubenswrapper[5133]: I1121 14:02:40.973637 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c34f5cc7-8dfa-48be-8392-0c4f8bb86208-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-bdzkx\" (UID: \"c34f5cc7-8dfa-48be-8392-0c4f8bb86208\") " pod="openstack/dnsmasq-dns-8554648995-bdzkx" Nov 21 14:02:40 crc kubenswrapper[5133]: I1121 14:02:40.973673 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c34f5cc7-8dfa-48be-8392-0c4f8bb86208-config\") pod \"dnsmasq-dns-8554648995-bdzkx\" (UID: \"c34f5cc7-8dfa-48be-8392-0c4f8bb86208\") " pod="openstack/dnsmasq-dns-8554648995-bdzkx" Nov 21 14:02:40 crc kubenswrapper[5133]: I1121 14:02:40.973704 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c34f5cc7-8dfa-48be-8392-0c4f8bb86208-dns-svc\") pod \"dnsmasq-dns-8554648995-bdzkx\" (UID: \"c34f5cc7-8dfa-48be-8392-0c4f8bb86208\") " pod="openstack/dnsmasq-dns-8554648995-bdzkx" Nov 21 14:02:40 crc kubenswrapper[5133]: I1121 14:02:40.973730 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/ac4c9909-c38c-4076-a8a5-ee3aa66a9630-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"ac4c9909-c38c-4076-a8a5-ee3aa66a9630\") " pod="openstack/ovn-northd-0" Nov 21 14:02:40 crc kubenswrapper[5133]: I1121 14:02:40.973784 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ac4c9909-c38c-4076-a8a5-ee3aa66a9630-scripts\") pod \"ovn-northd-0\" (UID: \"ac4c9909-c38c-4076-a8a5-ee3aa66a9630\") " pod="openstack/ovn-northd-0" Nov 21 14:02:40 crc kubenswrapper[5133]: I1121 14:02:40.973807 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ac4c9909-c38c-4076-a8a5-ee3aa66a9630-config\") pod \"ovn-northd-0\" (UID: \"ac4c9909-c38c-4076-a8a5-ee3aa66a9630\") " pod="openstack/ovn-northd-0" Nov 21 14:02:40 crc kubenswrapper[5133]: I1121 14:02:40.973861 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c7qkn\" (UniqueName: \"kubernetes.io/projected/c34f5cc7-8dfa-48be-8392-0c4f8bb86208-kube-api-access-c7qkn\") pod \"dnsmasq-dns-8554648995-bdzkx\" (UID: \"c34f5cc7-8dfa-48be-8392-0c4f8bb86208\") " pod="openstack/dnsmasq-dns-8554648995-bdzkx" Nov 21 14:02:40 crc kubenswrapper[5133]: I1121 14:02:40.973884 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac4c9909-c38c-4076-a8a5-ee3aa66a9630-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"ac4c9909-c38c-4076-a8a5-ee3aa66a9630\") " pod="openstack/ovn-northd-0" Nov 21 14:02:40 crc kubenswrapper[5133]: I1121 14:02:40.973930 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/ac4c9909-c38c-4076-a8a5-ee3aa66a9630-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"ac4c9909-c38c-4076-a8a5-ee3aa66a9630\") " pod="openstack/ovn-northd-0" Nov 21 14:02:40 crc kubenswrapper[5133]: I1121 14:02:40.973954 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c34f5cc7-8dfa-48be-8392-0c4f8bb86208-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-bdzkx\" (UID: \"c34f5cc7-8dfa-48be-8392-0c4f8bb86208\") " pod="openstack/dnsmasq-dns-8554648995-bdzkx" Nov 21 14:02:40 crc kubenswrapper[5133]: I1121 14:02:40.985809 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-xnvhj" Nov 21 14:02:41 crc kubenswrapper[5133]: I1121 14:02:41.076759 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-drqnz" Nov 21 14:02:41 crc kubenswrapper[5133]: I1121 14:02:41.076803 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c7qkn\" (UniqueName: \"kubernetes.io/projected/c34f5cc7-8dfa-48be-8392-0c4f8bb86208-kube-api-access-c7qkn\") pod \"dnsmasq-dns-8554648995-bdzkx\" (UID: \"c34f5cc7-8dfa-48be-8392-0c4f8bb86208\") " pod="openstack/dnsmasq-dns-8554648995-bdzkx" Nov 21 14:02:41 crc kubenswrapper[5133]: I1121 14:02:41.077444 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac4c9909-c38c-4076-a8a5-ee3aa66a9630-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"ac4c9909-c38c-4076-a8a5-ee3aa66a9630\") " pod="openstack/ovn-northd-0" Nov 21 14:02:41 crc kubenswrapper[5133]: I1121 14:02:41.077712 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/ac4c9909-c38c-4076-a8a5-ee3aa66a9630-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"ac4c9909-c38c-4076-a8a5-ee3aa66a9630\") " pod="openstack/ovn-northd-0" Nov 21 14:02:41 crc kubenswrapper[5133]: I1121 14:02:41.077750 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c34f5cc7-8dfa-48be-8392-0c4f8bb86208-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-bdzkx\" (UID: \"c34f5cc7-8dfa-48be-8392-0c4f8bb86208\") " pod="openstack/dnsmasq-dns-8554648995-bdzkx" Nov 21 14:02:41 crc kubenswrapper[5133]: I1121 14:02:41.077862 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/ac4c9909-c38c-4076-a8a5-ee3aa66a9630-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"ac4c9909-c38c-4076-a8a5-ee3aa66a9630\") " pod="openstack/ovn-northd-0" Nov 21 14:02:41 crc kubenswrapper[5133]: I1121 14:02:41.077880 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gchfh\" (UniqueName: \"kubernetes.io/projected/ac4c9909-c38c-4076-a8a5-ee3aa66a9630-kube-api-access-gchfh\") pod \"ovn-northd-0\" (UID: \"ac4c9909-c38c-4076-a8a5-ee3aa66a9630\") " pod="openstack/ovn-northd-0" Nov 21 14:02:41 crc kubenswrapper[5133]: I1121 14:02:41.078045 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c34f5cc7-8dfa-48be-8392-0c4f8bb86208-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-bdzkx\" (UID: \"c34f5cc7-8dfa-48be-8392-0c4f8bb86208\") " pod="openstack/dnsmasq-dns-8554648995-bdzkx" Nov 21 14:02:41 crc kubenswrapper[5133]: I1121 14:02:41.078074 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c34f5cc7-8dfa-48be-8392-0c4f8bb86208-config\") pod \"dnsmasq-dns-8554648995-bdzkx\" (UID: \"c34f5cc7-8dfa-48be-8392-0c4f8bb86208\") " pod="openstack/dnsmasq-dns-8554648995-bdzkx" Nov 21 14:02:41 crc kubenswrapper[5133]: I1121 14:02:41.078108 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c34f5cc7-8dfa-48be-8392-0c4f8bb86208-dns-svc\") pod \"dnsmasq-dns-8554648995-bdzkx\" (UID: \"c34f5cc7-8dfa-48be-8392-0c4f8bb86208\") " pod="openstack/dnsmasq-dns-8554648995-bdzkx" Nov 21 14:02:41 crc kubenswrapper[5133]: I1121 14:02:41.078124 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/ac4c9909-c38c-4076-a8a5-ee3aa66a9630-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"ac4c9909-c38c-4076-a8a5-ee3aa66a9630\") " pod="openstack/ovn-northd-0" Nov 21 14:02:41 crc kubenswrapper[5133]: I1121 14:02:41.078217 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ac4c9909-c38c-4076-a8a5-ee3aa66a9630-config\") pod \"ovn-northd-0\" (UID: \"ac4c9909-c38c-4076-a8a5-ee3aa66a9630\") " pod="openstack/ovn-northd-0" Nov 21 14:02:41 crc kubenswrapper[5133]: I1121 14:02:41.078241 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ac4c9909-c38c-4076-a8a5-ee3aa66a9630-scripts\") pod \"ovn-northd-0\" (UID: \"ac4c9909-c38c-4076-a8a5-ee3aa66a9630\") " pod="openstack/ovn-northd-0" Nov 21 14:02:41 crc kubenswrapper[5133]: I1121 14:02:41.079156 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ac4c9909-c38c-4076-a8a5-ee3aa66a9630-scripts\") pod \"ovn-northd-0\" (UID: \"ac4c9909-c38c-4076-a8a5-ee3aa66a9630\") " pod="openstack/ovn-northd-0" Nov 21 14:02:41 crc kubenswrapper[5133]: I1121 14:02:41.079643 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/ac4c9909-c38c-4076-a8a5-ee3aa66a9630-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"ac4c9909-c38c-4076-a8a5-ee3aa66a9630\") " pod="openstack/ovn-northd-0" Nov 21 14:02:41 crc kubenswrapper[5133]: I1121 14:02:41.080931 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ac4c9909-c38c-4076-a8a5-ee3aa66a9630-config\") pod \"ovn-northd-0\" (UID: \"ac4c9909-c38c-4076-a8a5-ee3aa66a9630\") " pod="openstack/ovn-northd-0" Nov 21 14:02:41 crc kubenswrapper[5133]: I1121 14:02:41.081218 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c34f5cc7-8dfa-48be-8392-0c4f8bb86208-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-bdzkx\" (UID: \"c34f5cc7-8dfa-48be-8392-0c4f8bb86208\") " pod="openstack/dnsmasq-dns-8554648995-bdzkx" Nov 21 14:02:41 crc kubenswrapper[5133]: I1121 14:02:41.081644 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c34f5cc7-8dfa-48be-8392-0c4f8bb86208-dns-svc\") pod \"dnsmasq-dns-8554648995-bdzkx\" (UID: \"c34f5cc7-8dfa-48be-8392-0c4f8bb86208\") " pod="openstack/dnsmasq-dns-8554648995-bdzkx" Nov 21 14:02:41 crc kubenswrapper[5133]: I1121 14:02:41.082876 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c34f5cc7-8dfa-48be-8392-0c4f8bb86208-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-bdzkx\" (UID: \"c34f5cc7-8dfa-48be-8392-0c4f8bb86208\") " pod="openstack/dnsmasq-dns-8554648995-bdzkx" Nov 21 14:02:41 crc kubenswrapper[5133]: I1121 14:02:41.084396 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c34f5cc7-8dfa-48be-8392-0c4f8bb86208-config\") pod \"dnsmasq-dns-8554648995-bdzkx\" (UID: \"c34f5cc7-8dfa-48be-8392-0c4f8bb86208\") " pod="openstack/dnsmasq-dns-8554648995-bdzkx" Nov 21 14:02:41 crc kubenswrapper[5133]: I1121 14:02:41.090790 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/ac4c9909-c38c-4076-a8a5-ee3aa66a9630-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"ac4c9909-c38c-4076-a8a5-ee3aa66a9630\") " pod="openstack/ovn-northd-0" Nov 21 14:02:41 crc kubenswrapper[5133]: I1121 14:02:41.091560 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/ac4c9909-c38c-4076-a8a5-ee3aa66a9630-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"ac4c9909-c38c-4076-a8a5-ee3aa66a9630\") " pod="openstack/ovn-northd-0" Nov 21 14:02:41 crc kubenswrapper[5133]: I1121 14:02:41.091819 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac4c9909-c38c-4076-a8a5-ee3aa66a9630-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"ac4c9909-c38c-4076-a8a5-ee3aa66a9630\") " pod="openstack/ovn-northd-0" Nov 21 14:02:41 crc kubenswrapper[5133]: I1121 14:02:41.112035 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gchfh\" (UniqueName: \"kubernetes.io/projected/ac4c9909-c38c-4076-a8a5-ee3aa66a9630-kube-api-access-gchfh\") pod \"ovn-northd-0\" (UID: \"ac4c9909-c38c-4076-a8a5-ee3aa66a9630\") " pod="openstack/ovn-northd-0" Nov 21 14:02:41 crc kubenswrapper[5133]: I1121 14:02:41.113158 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c7qkn\" (UniqueName: \"kubernetes.io/projected/c34f5cc7-8dfa-48be-8392-0c4f8bb86208-kube-api-access-c7qkn\") pod \"dnsmasq-dns-8554648995-bdzkx\" (UID: \"c34f5cc7-8dfa-48be-8392-0c4f8bb86208\") " pod="openstack/dnsmasq-dns-8554648995-bdzkx" Nov 21 14:02:41 crc kubenswrapper[5133]: I1121 14:02:41.174585 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 21 14:02:41 crc kubenswrapper[5133]: I1121 14:02:41.183513 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/50c34142-ef58-4e11-acce-613c693cc483-dns-svc\") pod \"50c34142-ef58-4e11-acce-613c693cc483\" (UID: \"50c34142-ef58-4e11-acce-613c693cc483\") " Nov 21 14:02:41 crc kubenswrapper[5133]: I1121 14:02:41.183785 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/50c34142-ef58-4e11-acce-613c693cc483-config\") pod \"50c34142-ef58-4e11-acce-613c693cc483\" (UID: \"50c34142-ef58-4e11-acce-613c693cc483\") " Nov 21 14:02:41 crc kubenswrapper[5133]: I1121 14:02:41.183824 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t7q2s\" (UniqueName: \"kubernetes.io/projected/50c34142-ef58-4e11-acce-613c693cc483-kube-api-access-t7q2s\") pod \"50c34142-ef58-4e11-acce-613c693cc483\" (UID: \"50c34142-ef58-4e11-acce-613c693cc483\") " Nov 21 14:02:41 crc kubenswrapper[5133]: I1121 14:02:41.185093 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/50c34142-ef58-4e11-acce-613c693cc483-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "50c34142-ef58-4e11-acce-613c693cc483" (UID: "50c34142-ef58-4e11-acce-613c693cc483"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:02:41 crc kubenswrapper[5133]: I1121 14:02:41.187540 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/50c34142-ef58-4e11-acce-613c693cc483-config" (OuterVolumeSpecName: "config") pod "50c34142-ef58-4e11-acce-613c693cc483" (UID: "50c34142-ef58-4e11-acce-613c693cc483"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:02:41 crc kubenswrapper[5133]: I1121 14:02:41.194069 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/50c34142-ef58-4e11-acce-613c693cc483-kube-api-access-t7q2s" (OuterVolumeSpecName: "kube-api-access-t7q2s") pod "50c34142-ef58-4e11-acce-613c693cc483" (UID: "50c34142-ef58-4e11-acce-613c693cc483"). InnerVolumeSpecName "kube-api-access-t7q2s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:02:41 crc kubenswrapper[5133]: I1121 14:02:41.236413 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-k5kck" Nov 21 14:02:41 crc kubenswrapper[5133]: I1121 14:02:41.286140 5133 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/50c34142-ef58-4e11-acce-613c693cc483-config\") on node \"crc\" DevicePath \"\"" Nov 21 14:02:41 crc kubenswrapper[5133]: I1121 14:02:41.286182 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t7q2s\" (UniqueName: \"kubernetes.io/projected/50c34142-ef58-4e11-acce-613c693cc483-kube-api-access-t7q2s\") on node \"crc\" DevicePath \"\"" Nov 21 14:02:41 crc kubenswrapper[5133]: I1121 14:02:41.286197 5133 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/50c34142-ef58-4e11-acce-613c693cc483-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 21 14:02:41 crc kubenswrapper[5133]: I1121 14:02:41.337940 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-drqnz" event={"ID":"50c34142-ef58-4e11-acce-613c693cc483","Type":"ContainerDied","Data":"0b5e38935b8567077df4bfa162a1bc8239cd3a969ba65d96045d76a83f672d42"} Nov 21 14:02:41 crc kubenswrapper[5133]: I1121 14:02:41.338077 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-drqnz" Nov 21 14:02:41 crc kubenswrapper[5133]: I1121 14:02:41.356260 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-k5kck" event={"ID":"86efb203-682b-4933-acc2-14cf239d4770","Type":"ContainerDied","Data":"494dbcf89e7ed85ff68e9931b6f79e7fddb8130e1fe87af84a074f36a6479c08"} Nov 21 14:02:41 crc kubenswrapper[5133]: I1121 14:02:41.356283 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-k5kck" Nov 21 14:02:41 crc kubenswrapper[5133]: I1121 14:02:41.360886 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-bdzkx" Nov 21 14:02:41 crc kubenswrapper[5133]: I1121 14:02:41.387255 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bggrg\" (UniqueName: \"kubernetes.io/projected/86efb203-682b-4933-acc2-14cf239d4770-kube-api-access-bggrg\") pod \"86efb203-682b-4933-acc2-14cf239d4770\" (UID: \"86efb203-682b-4933-acc2-14cf239d4770\") " Nov 21 14:02:41 crc kubenswrapper[5133]: I1121 14:02:41.387369 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/86efb203-682b-4933-acc2-14cf239d4770-config\") pod \"86efb203-682b-4933-acc2-14cf239d4770\" (UID: \"86efb203-682b-4933-acc2-14cf239d4770\") " Nov 21 14:02:41 crc kubenswrapper[5133]: I1121 14:02:41.387471 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/86efb203-682b-4933-acc2-14cf239d4770-dns-svc\") pod \"86efb203-682b-4933-acc2-14cf239d4770\" (UID: \"86efb203-682b-4933-acc2-14cf239d4770\") " Nov 21 14:02:41 crc kubenswrapper[5133]: I1121 14:02:41.388598 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/86efb203-682b-4933-acc2-14cf239d4770-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "86efb203-682b-4933-acc2-14cf239d4770" (UID: "86efb203-682b-4933-acc2-14cf239d4770"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:02:41 crc kubenswrapper[5133]: I1121 14:02:41.389798 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/86efb203-682b-4933-acc2-14cf239d4770-config" (OuterVolumeSpecName: "config") pod "86efb203-682b-4933-acc2-14cf239d4770" (UID: "86efb203-682b-4933-acc2-14cf239d4770"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:02:41 crc kubenswrapper[5133]: I1121 14:02:41.398410 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/86efb203-682b-4933-acc2-14cf239d4770-kube-api-access-bggrg" (OuterVolumeSpecName: "kube-api-access-bggrg") pod "86efb203-682b-4933-acc2-14cf239d4770" (UID: "86efb203-682b-4933-acc2-14cf239d4770"). InnerVolumeSpecName "kube-api-access-bggrg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:02:41 crc kubenswrapper[5133]: I1121 14:02:41.408058 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-drqnz"] Nov 21 14:02:41 crc kubenswrapper[5133]: I1121 14:02:41.415965 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-drqnz"] Nov 21 14:02:41 crc kubenswrapper[5133]: I1121 14:02:41.465601 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5bf47b49b7-pbxzg"] Nov 21 14:02:41 crc kubenswrapper[5133]: I1121 14:02:41.496415 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bggrg\" (UniqueName: \"kubernetes.io/projected/86efb203-682b-4933-acc2-14cf239d4770-kube-api-access-bggrg\") on node \"crc\" DevicePath \"\"" Nov 21 14:02:41 crc kubenswrapper[5133]: I1121 14:02:41.496458 5133 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/86efb203-682b-4933-acc2-14cf239d4770-config\") on node \"crc\" DevicePath \"\"" Nov 21 14:02:41 crc kubenswrapper[5133]: I1121 14:02:41.496488 5133 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/86efb203-682b-4933-acc2-14cf239d4770-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 21 14:02:41 crc kubenswrapper[5133]: I1121 14:02:41.543792 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-xnvhj"] Nov 21 14:02:41 crc kubenswrapper[5133]: I1121 14:02:41.803717 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 21 14:02:41 crc kubenswrapper[5133]: I1121 14:02:41.817292 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-k5kck"] Nov 21 14:02:41 crc kubenswrapper[5133]: I1121 14:02:41.823803 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-k5kck"] Nov 21 14:02:41 crc kubenswrapper[5133]: W1121 14:02:41.867042 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc34f5cc7_8dfa_48be_8392_0c4f8bb86208.slice/crio-0bf636752b964bd464281bee3e6c961b167323b1c91011d46c23d79228a6d7e0 WatchSource:0}: Error finding container 0bf636752b964bd464281bee3e6c961b167323b1c91011d46c23d79228a6d7e0: Status 404 returned error can't find the container with id 0bf636752b964bd464281bee3e6c961b167323b1c91011d46c23d79228a6d7e0 Nov 21 14:02:41 crc kubenswrapper[5133]: I1121 14:02:41.883741 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8554648995-bdzkx"] Nov 21 14:02:41 crc kubenswrapper[5133]: E1121 14:02:41.887845 5133 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.179:34972->38.102.83.179:33373: write tcp 38.102.83.179:34972->38.102.83.179:33373: write: broken pipe Nov 21 14:02:42 crc kubenswrapper[5133]: I1121 14:02:42.366511 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-xnvhj" event={"ID":"77732c59-506b-4d76-9f92-efe5357ed5e9","Type":"ContainerStarted","Data":"e2ad8c8b7038e1f3e361bbfb104398265e48bbb63c51036b8bb93761c5401b2a"} Nov 21 14:02:42 crc kubenswrapper[5133]: I1121 14:02:42.367106 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-xnvhj" event={"ID":"77732c59-506b-4d76-9f92-efe5357ed5e9","Type":"ContainerStarted","Data":"0a38d046f5afe674129860ad2cbf2128ab624a22a5b0b193751540d00a113586"} Nov 21 14:02:42 crc kubenswrapper[5133]: I1121 14:02:42.367502 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-bdzkx" event={"ID":"c34f5cc7-8dfa-48be-8392-0c4f8bb86208","Type":"ContainerStarted","Data":"0bf636752b964bd464281bee3e6c961b167323b1c91011d46c23d79228a6d7e0"} Nov 21 14:02:42 crc kubenswrapper[5133]: I1121 14:02:42.368869 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"ac4c9909-c38c-4076-a8a5-ee3aa66a9630","Type":"ContainerStarted","Data":"d55d67102e456d4dbb052048193770ce01e2e70079a9ba8047a5eec667069f12"} Nov 21 14:02:42 crc kubenswrapper[5133]: I1121 14:02:42.371849 5133 generic.go:334] "Generic (PLEG): container finished" podID="20185974-caab-4948-ae2b-159ad95e3f21" containerID="40af7ee68b6d5eb7ac3c45c8a90630686ac508c4ef05fef4ae4377b7b34e2c89" exitCode=0 Nov 21 14:02:42 crc kubenswrapper[5133]: I1121 14:02:42.372297 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bf47b49b7-pbxzg" event={"ID":"20185974-caab-4948-ae2b-159ad95e3f21","Type":"ContainerDied","Data":"40af7ee68b6d5eb7ac3c45c8a90630686ac508c4ef05fef4ae4377b7b34e2c89"} Nov 21 14:02:42 crc kubenswrapper[5133]: I1121 14:02:42.372350 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bf47b49b7-pbxzg" event={"ID":"20185974-caab-4948-ae2b-159ad95e3f21","Type":"ContainerStarted","Data":"08671c86a502027478a180a337528ed5979888f73ced905ecccec899c70764ce"} Nov 21 14:02:42 crc kubenswrapper[5133]: I1121 14:02:42.391719 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-xnvhj" podStartSLOduration=2.391695958 podStartE2EDuration="2.391695958s" podCreationTimestamp="2025-11-21 14:02:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 14:02:42.391436101 +0000 UTC m=+1222.189268349" watchObservedRunningTime="2025-11-21 14:02:42.391695958 +0000 UTC m=+1222.189528206" Nov 21 14:02:42 crc kubenswrapper[5133]: I1121 14:02:42.503604 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="50c34142-ef58-4e11-acce-613c693cc483" path="/var/lib/kubelet/pods/50c34142-ef58-4e11-acce-613c693cc483/volumes" Nov 21 14:02:42 crc kubenswrapper[5133]: I1121 14:02:42.504657 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="86efb203-682b-4933-acc2-14cf239d4770" path="/var/lib/kubelet/pods/86efb203-682b-4933-acc2-14cf239d4770/volumes" Nov 21 14:02:43 crc kubenswrapper[5133]: I1121 14:02:43.382703 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bf47b49b7-pbxzg" event={"ID":"20185974-caab-4948-ae2b-159ad95e3f21","Type":"ContainerStarted","Data":"b54536c365d0de470f9c939720e25fdf838a0882a7dffa29c457ee932ab6e4d9"} Nov 21 14:02:43 crc kubenswrapper[5133]: I1121 14:02:43.383149 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5bf47b49b7-pbxzg" Nov 21 14:02:43 crc kubenswrapper[5133]: I1121 14:02:43.384332 5133 generic.go:334] "Generic (PLEG): container finished" podID="c34f5cc7-8dfa-48be-8392-0c4f8bb86208" containerID="23d1ee36620c45b12f8b35d52d2d00eea2e8cb970f3bdbbce06d63129d84e14f" exitCode=0 Nov 21 14:02:43 crc kubenswrapper[5133]: I1121 14:02:43.384397 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-bdzkx" event={"ID":"c34f5cc7-8dfa-48be-8392-0c4f8bb86208","Type":"ContainerDied","Data":"23d1ee36620c45b12f8b35d52d2d00eea2e8cb970f3bdbbce06d63129d84e14f"} Nov 21 14:02:43 crc kubenswrapper[5133]: I1121 14:02:43.390229 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"ac4c9909-c38c-4076-a8a5-ee3aa66a9630","Type":"ContainerStarted","Data":"df6006834cd8255e1a0e37d2f65f38f11eb5ce3d674b4a3bc0994a3c1ae6ad2e"} Nov 21 14:02:43 crc kubenswrapper[5133]: I1121 14:02:43.407630 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5bf47b49b7-pbxzg" podStartSLOduration=2.8556941179999997 podStartE2EDuration="3.407610209s" podCreationTimestamp="2025-11-21 14:02:40 +0000 UTC" firstStartedPulling="2025-11-21 14:02:41.490053553 +0000 UTC m=+1221.287885801" lastFinishedPulling="2025-11-21 14:02:42.041969644 +0000 UTC m=+1221.839801892" observedRunningTime="2025-11-21 14:02:43.402102262 +0000 UTC m=+1223.199934530" watchObservedRunningTime="2025-11-21 14:02:43.407610209 +0000 UTC m=+1223.205442457" Nov 21 14:02:43 crc kubenswrapper[5133]: I1121 14:02:43.525117 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Nov 21 14:02:43 crc kubenswrapper[5133]: I1121 14:02:43.525463 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Nov 21 14:02:43 crc kubenswrapper[5133]: I1121 14:02:43.760718 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Nov 21 14:02:44 crc kubenswrapper[5133]: I1121 14:02:44.402970 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"ac4c9909-c38c-4076-a8a5-ee3aa66a9630","Type":"ContainerStarted","Data":"f8d438d1ceee1852c7df8c5962037231fe4d1d0e50f5b3b32e323994792bfd0f"} Nov 21 14:02:44 crc kubenswrapper[5133]: I1121 14:02:44.403447 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Nov 21 14:02:44 crc kubenswrapper[5133]: I1121 14:02:44.405833 5133 generic.go:334] "Generic (PLEG): container finished" podID="3afd96f5-effd-43e8-8986-b9fc1fd28233" containerID="8b60192a426f1016f501d0ff258c988f0b52978ea40f48665c29814acc9cf64d" exitCode=0 Nov 21 14:02:44 crc kubenswrapper[5133]: I1121 14:02:44.405924 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"3afd96f5-effd-43e8-8986-b9fc1fd28233","Type":"ContainerDied","Data":"8b60192a426f1016f501d0ff258c988f0b52978ea40f48665c29814acc9cf64d"} Nov 21 14:02:44 crc kubenswrapper[5133]: I1121 14:02:44.410103 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-bdzkx" event={"ID":"c34f5cc7-8dfa-48be-8392-0c4f8bb86208","Type":"ContainerStarted","Data":"d1cf7b7258cb63743b20f8c49032ed6e0de6e4fe067e736963cddf65b034e063"} Nov 21 14:02:44 crc kubenswrapper[5133]: I1121 14:02:44.410542 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-8554648995-bdzkx" Nov 21 14:02:44 crc kubenswrapper[5133]: I1121 14:02:44.429067 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=3.220166957 podStartE2EDuration="4.429027768s" podCreationTimestamp="2025-11-21 14:02:40 +0000 UTC" firstStartedPulling="2025-11-21 14:02:41.820482771 +0000 UTC m=+1221.618315019" lastFinishedPulling="2025-11-21 14:02:43.029343582 +0000 UTC m=+1222.827175830" observedRunningTime="2025-11-21 14:02:44.422179604 +0000 UTC m=+1224.220011882" watchObservedRunningTime="2025-11-21 14:02:44.429027768 +0000 UTC m=+1224.226860036" Nov 21 14:02:44 crc kubenswrapper[5133]: I1121 14:02:44.489272 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-8554648995-bdzkx" podStartSLOduration=4.056976277 podStartE2EDuration="4.489230048s" podCreationTimestamp="2025-11-21 14:02:40 +0000 UTC" firstStartedPulling="2025-11-21 14:02:41.872288296 +0000 UTC m=+1221.670120544" lastFinishedPulling="2025-11-21 14:02:42.304542067 +0000 UTC m=+1222.102374315" observedRunningTime="2025-11-21 14:02:44.443385932 +0000 UTC m=+1224.241218190" watchObservedRunningTime="2025-11-21 14:02:44.489230048 +0000 UTC m=+1224.287062296" Nov 21 14:02:44 crc kubenswrapper[5133]: I1121 14:02:44.502433 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Nov 21 14:02:45 crc kubenswrapper[5133]: I1121 14:02:45.404289 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 21 14:02:45 crc kubenswrapper[5133]: I1121 14:02:45.425271 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"3afd96f5-effd-43e8-8986-b9fc1fd28233","Type":"ContainerStarted","Data":"00162537ffbeaa5d7d0424dd23fd639206ef4ec9029c8429c4083051211e6b51"} Nov 21 14:02:45 crc kubenswrapper[5133]: I1121 14:02:45.480767 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=-9223371991.374039 podStartE2EDuration="45.480736795s" podCreationTimestamp="2025-11-21 14:02:00 +0000 UTC" firstStartedPulling="2025-11-21 14:02:02.445813585 +0000 UTC m=+1182.243645833" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 14:02:45.47158539 +0000 UTC m=+1225.269417648" watchObservedRunningTime="2025-11-21 14:02:45.480736795 +0000 UTC m=+1225.278569063" Nov 21 14:02:50 crc kubenswrapper[5133]: I1121 14:02:50.958428 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5bf47b49b7-pbxzg" Nov 21 14:02:51 crc kubenswrapper[5133]: I1121 14:02:51.363089 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-8554648995-bdzkx" Nov 21 14:02:51 crc kubenswrapper[5133]: I1121 14:02:51.418885 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5bf47b49b7-pbxzg"] Nov 21 14:02:51 crc kubenswrapper[5133]: I1121 14:02:51.483783 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5bf47b49b7-pbxzg" podUID="20185974-caab-4948-ae2b-159ad95e3f21" containerName="dnsmasq-dns" containerID="cri-o://b54536c365d0de470f9c939720e25fdf838a0882a7dffa29c457ee932ab6e4d9" gracePeriod=10 Nov 21 14:02:51 crc kubenswrapper[5133]: I1121 14:02:51.875789 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Nov 21 14:02:51 crc kubenswrapper[5133]: I1121 14:02:51.875868 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Nov 21 14:02:51 crc kubenswrapper[5133]: I1121 14:02:51.966025 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Nov 21 14:02:51 crc kubenswrapper[5133]: I1121 14:02:51.983373 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bf47b49b7-pbxzg" Nov 21 14:02:52 crc kubenswrapper[5133]: I1121 14:02:52.028572 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/20185974-caab-4948-ae2b-159ad95e3f21-ovsdbserver-nb\") pod \"20185974-caab-4948-ae2b-159ad95e3f21\" (UID: \"20185974-caab-4948-ae2b-159ad95e3f21\") " Nov 21 14:02:52 crc kubenswrapper[5133]: I1121 14:02:52.028722 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6zqbz\" (UniqueName: \"kubernetes.io/projected/20185974-caab-4948-ae2b-159ad95e3f21-kube-api-access-6zqbz\") pod \"20185974-caab-4948-ae2b-159ad95e3f21\" (UID: \"20185974-caab-4948-ae2b-159ad95e3f21\") " Nov 21 14:02:52 crc kubenswrapper[5133]: I1121 14:02:52.028798 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/20185974-caab-4948-ae2b-159ad95e3f21-config\") pod \"20185974-caab-4948-ae2b-159ad95e3f21\" (UID: \"20185974-caab-4948-ae2b-159ad95e3f21\") " Nov 21 14:02:52 crc kubenswrapper[5133]: I1121 14:02:52.028826 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/20185974-caab-4948-ae2b-159ad95e3f21-dns-svc\") pod \"20185974-caab-4948-ae2b-159ad95e3f21\" (UID: \"20185974-caab-4948-ae2b-159ad95e3f21\") " Nov 21 14:02:52 crc kubenswrapper[5133]: I1121 14:02:52.049429 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20185974-caab-4948-ae2b-159ad95e3f21-kube-api-access-6zqbz" (OuterVolumeSpecName: "kube-api-access-6zqbz") pod "20185974-caab-4948-ae2b-159ad95e3f21" (UID: "20185974-caab-4948-ae2b-159ad95e3f21"). InnerVolumeSpecName "kube-api-access-6zqbz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:02:52 crc kubenswrapper[5133]: I1121 14:02:52.079302 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/20185974-caab-4948-ae2b-159ad95e3f21-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "20185974-caab-4948-ae2b-159ad95e3f21" (UID: "20185974-caab-4948-ae2b-159ad95e3f21"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:02:52 crc kubenswrapper[5133]: I1121 14:02:52.105012 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/20185974-caab-4948-ae2b-159ad95e3f21-config" (OuterVolumeSpecName: "config") pod "20185974-caab-4948-ae2b-159ad95e3f21" (UID: "20185974-caab-4948-ae2b-159ad95e3f21"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:02:52 crc kubenswrapper[5133]: I1121 14:02:52.112701 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/20185974-caab-4948-ae2b-159ad95e3f21-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "20185974-caab-4948-ae2b-159ad95e3f21" (UID: "20185974-caab-4948-ae2b-159ad95e3f21"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:02:52 crc kubenswrapper[5133]: I1121 14:02:52.132943 5133 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/20185974-caab-4948-ae2b-159ad95e3f21-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 21 14:02:52 crc kubenswrapper[5133]: I1121 14:02:52.132983 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6zqbz\" (UniqueName: \"kubernetes.io/projected/20185974-caab-4948-ae2b-159ad95e3f21-kube-api-access-6zqbz\") on node \"crc\" DevicePath \"\"" Nov 21 14:02:52 crc kubenswrapper[5133]: I1121 14:02:52.133009 5133 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/20185974-caab-4948-ae2b-159ad95e3f21-config\") on node \"crc\" DevicePath \"\"" Nov 21 14:02:52 crc kubenswrapper[5133]: I1121 14:02:52.133021 5133 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/20185974-caab-4948-ae2b-159ad95e3f21-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 21 14:02:52 crc kubenswrapper[5133]: I1121 14:02:52.497903 5133 generic.go:334] "Generic (PLEG): container finished" podID="20185974-caab-4948-ae2b-159ad95e3f21" containerID="b54536c365d0de470f9c939720e25fdf838a0882a7dffa29c457ee932ab6e4d9" exitCode=0 Nov 21 14:02:52 crc kubenswrapper[5133]: I1121 14:02:52.498186 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bf47b49b7-pbxzg" event={"ID":"20185974-caab-4948-ae2b-159ad95e3f21","Type":"ContainerDied","Data":"b54536c365d0de470f9c939720e25fdf838a0882a7dffa29c457ee932ab6e4d9"} Nov 21 14:02:52 crc kubenswrapper[5133]: I1121 14:02:52.498291 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bf47b49b7-pbxzg" event={"ID":"20185974-caab-4948-ae2b-159ad95e3f21","Type":"ContainerDied","Data":"08671c86a502027478a180a337528ed5979888f73ced905ecccec899c70764ce"} Nov 21 14:02:52 crc kubenswrapper[5133]: I1121 14:02:52.498321 5133 scope.go:117] "RemoveContainer" containerID="b54536c365d0de470f9c939720e25fdf838a0882a7dffa29c457ee932ab6e4d9" Nov 21 14:02:52 crc kubenswrapper[5133]: I1121 14:02:52.498443 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bf47b49b7-pbxzg" Nov 21 14:02:52 crc kubenswrapper[5133]: I1121 14:02:52.529395 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5bf47b49b7-pbxzg"] Nov 21 14:02:52 crc kubenswrapper[5133]: I1121 14:02:52.548055 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5bf47b49b7-pbxzg"] Nov 21 14:02:52 crc kubenswrapper[5133]: I1121 14:02:52.551276 5133 scope.go:117] "RemoveContainer" containerID="40af7ee68b6d5eb7ac3c45c8a90630686ac508c4ef05fef4ae4377b7b34e2c89" Nov 21 14:02:52 crc kubenswrapper[5133]: I1121 14:02:52.573470 5133 scope.go:117] "RemoveContainer" containerID="b54536c365d0de470f9c939720e25fdf838a0882a7dffa29c457ee932ab6e4d9" Nov 21 14:02:52 crc kubenswrapper[5133]: E1121 14:02:52.575205 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b54536c365d0de470f9c939720e25fdf838a0882a7dffa29c457ee932ab6e4d9\": container with ID starting with b54536c365d0de470f9c939720e25fdf838a0882a7dffa29c457ee932ab6e4d9 not found: ID does not exist" containerID="b54536c365d0de470f9c939720e25fdf838a0882a7dffa29c457ee932ab6e4d9" Nov 21 14:02:52 crc kubenswrapper[5133]: I1121 14:02:52.575401 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b54536c365d0de470f9c939720e25fdf838a0882a7dffa29c457ee932ab6e4d9"} err="failed to get container status \"b54536c365d0de470f9c939720e25fdf838a0882a7dffa29c457ee932ab6e4d9\": rpc error: code = NotFound desc = could not find container \"b54536c365d0de470f9c939720e25fdf838a0882a7dffa29c457ee932ab6e4d9\": container with ID starting with b54536c365d0de470f9c939720e25fdf838a0882a7dffa29c457ee932ab6e4d9 not found: ID does not exist" Nov 21 14:02:52 crc kubenswrapper[5133]: I1121 14:02:52.575687 5133 scope.go:117] "RemoveContainer" containerID="40af7ee68b6d5eb7ac3c45c8a90630686ac508c4ef05fef4ae4377b7b34e2c89" Nov 21 14:02:52 crc kubenswrapper[5133]: E1121 14:02:52.577151 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"40af7ee68b6d5eb7ac3c45c8a90630686ac508c4ef05fef4ae4377b7b34e2c89\": container with ID starting with 40af7ee68b6d5eb7ac3c45c8a90630686ac508c4ef05fef4ae4377b7b34e2c89 not found: ID does not exist" containerID="40af7ee68b6d5eb7ac3c45c8a90630686ac508c4ef05fef4ae4377b7b34e2c89" Nov 21 14:02:52 crc kubenswrapper[5133]: I1121 14:02:52.577184 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"40af7ee68b6d5eb7ac3c45c8a90630686ac508c4ef05fef4ae4377b7b34e2c89"} err="failed to get container status \"40af7ee68b6d5eb7ac3c45c8a90630686ac508c4ef05fef4ae4377b7b34e2c89\": rpc error: code = NotFound desc = could not find container \"40af7ee68b6d5eb7ac3c45c8a90630686ac508c4ef05fef4ae4377b7b34e2c89\": container with ID starting with 40af7ee68b6d5eb7ac3c45c8a90630686ac508c4ef05fef4ae4377b7b34e2c89 not found: ID does not exist" Nov 21 14:02:52 crc kubenswrapper[5133]: I1121 14:02:52.608473 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Nov 21 14:02:53 crc kubenswrapper[5133]: I1121 14:02:53.241892 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-091f-account-create-m48fq"] Nov 21 14:02:53 crc kubenswrapper[5133]: E1121 14:02:53.242938 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20185974-caab-4948-ae2b-159ad95e3f21" containerName="init" Nov 21 14:02:53 crc kubenswrapper[5133]: I1121 14:02:53.242958 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="20185974-caab-4948-ae2b-159ad95e3f21" containerName="init" Nov 21 14:02:53 crc kubenswrapper[5133]: E1121 14:02:53.243070 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20185974-caab-4948-ae2b-159ad95e3f21" containerName="dnsmasq-dns" Nov 21 14:02:53 crc kubenswrapper[5133]: I1121 14:02:53.243080 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="20185974-caab-4948-ae2b-159ad95e3f21" containerName="dnsmasq-dns" Nov 21 14:02:53 crc kubenswrapper[5133]: I1121 14:02:53.243331 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="20185974-caab-4948-ae2b-159ad95e3f21" containerName="dnsmasq-dns" Nov 21 14:02:53 crc kubenswrapper[5133]: I1121 14:02:53.244148 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-091f-account-create-m48fq" Nov 21 14:02:53 crc kubenswrapper[5133]: I1121 14:02:53.247351 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Nov 21 14:02:53 crc kubenswrapper[5133]: I1121 14:02:53.249917 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-jb7qk"] Nov 21 14:02:53 crc kubenswrapper[5133]: I1121 14:02:53.251208 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-jb7qk" Nov 21 14:02:53 crc kubenswrapper[5133]: I1121 14:02:53.259846 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-091f-account-create-m48fq"] Nov 21 14:02:53 crc kubenswrapper[5133]: I1121 14:02:53.272599 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-jb7qk"] Nov 21 14:02:53 crc kubenswrapper[5133]: I1121 14:02:53.311392 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 14:02:53 crc kubenswrapper[5133]: I1121 14:02:53.311456 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 14:02:53 crc kubenswrapper[5133]: I1121 14:02:53.360862 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/baacbad7-4746-4ce7-92a6-c820bdd0a2ac-operator-scripts\") pod \"keystone-091f-account-create-m48fq\" (UID: \"baacbad7-4746-4ce7-92a6-c820bdd0a2ac\") " pod="openstack/keystone-091f-account-create-m48fq" Nov 21 14:02:53 crc kubenswrapper[5133]: I1121 14:02:53.360935 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kbfk9\" (UniqueName: \"kubernetes.io/projected/baacbad7-4746-4ce7-92a6-c820bdd0a2ac-kube-api-access-kbfk9\") pod \"keystone-091f-account-create-m48fq\" (UID: \"baacbad7-4746-4ce7-92a6-c820bdd0a2ac\") " pod="openstack/keystone-091f-account-create-m48fq" Nov 21 14:02:53 crc kubenswrapper[5133]: I1121 14:02:53.361642 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/edcec591-6e27-4287-abdf-71dd047698a9-operator-scripts\") pod \"keystone-db-create-jb7qk\" (UID: \"edcec591-6e27-4287-abdf-71dd047698a9\") " pod="openstack/keystone-db-create-jb7qk" Nov 21 14:02:53 crc kubenswrapper[5133]: I1121 14:02:53.361716 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vsgzd\" (UniqueName: \"kubernetes.io/projected/edcec591-6e27-4287-abdf-71dd047698a9-kube-api-access-vsgzd\") pod \"keystone-db-create-jb7qk\" (UID: \"edcec591-6e27-4287-abdf-71dd047698a9\") " pod="openstack/keystone-db-create-jb7qk" Nov 21 14:02:53 crc kubenswrapper[5133]: I1121 14:02:53.463576 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vsgzd\" (UniqueName: \"kubernetes.io/projected/edcec591-6e27-4287-abdf-71dd047698a9-kube-api-access-vsgzd\") pod \"keystone-db-create-jb7qk\" (UID: \"edcec591-6e27-4287-abdf-71dd047698a9\") " pod="openstack/keystone-db-create-jb7qk" Nov 21 14:02:53 crc kubenswrapper[5133]: I1121 14:02:53.463712 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/baacbad7-4746-4ce7-92a6-c820bdd0a2ac-operator-scripts\") pod \"keystone-091f-account-create-m48fq\" (UID: \"baacbad7-4746-4ce7-92a6-c820bdd0a2ac\") " pod="openstack/keystone-091f-account-create-m48fq" Nov 21 14:02:53 crc kubenswrapper[5133]: I1121 14:02:53.463742 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kbfk9\" (UniqueName: \"kubernetes.io/projected/baacbad7-4746-4ce7-92a6-c820bdd0a2ac-kube-api-access-kbfk9\") pod \"keystone-091f-account-create-m48fq\" (UID: \"baacbad7-4746-4ce7-92a6-c820bdd0a2ac\") " pod="openstack/keystone-091f-account-create-m48fq" Nov 21 14:02:53 crc kubenswrapper[5133]: I1121 14:02:53.463817 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/edcec591-6e27-4287-abdf-71dd047698a9-operator-scripts\") pod \"keystone-db-create-jb7qk\" (UID: \"edcec591-6e27-4287-abdf-71dd047698a9\") " pod="openstack/keystone-db-create-jb7qk" Nov 21 14:02:53 crc kubenswrapper[5133]: I1121 14:02:53.464884 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/edcec591-6e27-4287-abdf-71dd047698a9-operator-scripts\") pod \"keystone-db-create-jb7qk\" (UID: \"edcec591-6e27-4287-abdf-71dd047698a9\") " pod="openstack/keystone-db-create-jb7qk" Nov 21 14:02:53 crc kubenswrapper[5133]: I1121 14:02:53.464892 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/baacbad7-4746-4ce7-92a6-c820bdd0a2ac-operator-scripts\") pod \"keystone-091f-account-create-m48fq\" (UID: \"baacbad7-4746-4ce7-92a6-c820bdd0a2ac\") " pod="openstack/keystone-091f-account-create-m48fq" Nov 21 14:02:53 crc kubenswrapper[5133]: I1121 14:02:53.478118 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-9z6cc"] Nov 21 14:02:53 crc kubenswrapper[5133]: I1121 14:02:53.479586 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-9z6cc" Nov 21 14:02:53 crc kubenswrapper[5133]: I1121 14:02:53.487224 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kbfk9\" (UniqueName: \"kubernetes.io/projected/baacbad7-4746-4ce7-92a6-c820bdd0a2ac-kube-api-access-kbfk9\") pod \"keystone-091f-account-create-m48fq\" (UID: \"baacbad7-4746-4ce7-92a6-c820bdd0a2ac\") " pod="openstack/keystone-091f-account-create-m48fq" Nov 21 14:02:53 crc kubenswrapper[5133]: I1121 14:02:53.487560 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vsgzd\" (UniqueName: \"kubernetes.io/projected/edcec591-6e27-4287-abdf-71dd047698a9-kube-api-access-vsgzd\") pod \"keystone-db-create-jb7qk\" (UID: \"edcec591-6e27-4287-abdf-71dd047698a9\") " pod="openstack/keystone-db-create-jb7qk" Nov 21 14:02:53 crc kubenswrapper[5133]: I1121 14:02:53.491043 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-9z6cc"] Nov 21 14:02:53 crc kubenswrapper[5133]: I1121 14:02:53.561483 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-091f-account-create-m48fq" Nov 21 14:02:53 crc kubenswrapper[5133]: I1121 14:02:53.587676 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-jb7qk" Nov 21 14:02:53 crc kubenswrapper[5133]: I1121 14:02:53.600750 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-01ea-account-create-5bjqm"] Nov 21 14:02:53 crc kubenswrapper[5133]: I1121 14:02:53.602043 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-01ea-account-create-5bjqm" Nov 21 14:02:53 crc kubenswrapper[5133]: I1121 14:02:53.607459 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Nov 21 14:02:53 crc kubenswrapper[5133]: I1121 14:02:53.621974 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-01ea-account-create-5bjqm"] Nov 21 14:02:53 crc kubenswrapper[5133]: I1121 14:02:53.668823 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b1633232-e2fd-4172-b929-22af732c2a8f-operator-scripts\") pod \"placement-db-create-9z6cc\" (UID: \"b1633232-e2fd-4172-b929-22af732c2a8f\") " pod="openstack/placement-db-create-9z6cc" Nov 21 14:02:53 crc kubenswrapper[5133]: I1121 14:02:53.668870 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6tqj6\" (UniqueName: \"kubernetes.io/projected/b1633232-e2fd-4172-b929-22af732c2a8f-kube-api-access-6tqj6\") pod \"placement-db-create-9z6cc\" (UID: \"b1633232-e2fd-4172-b929-22af732c2a8f\") " pod="openstack/placement-db-create-9z6cc" Nov 21 14:02:53 crc kubenswrapper[5133]: I1121 14:02:53.771124 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b1633232-e2fd-4172-b929-22af732c2a8f-operator-scripts\") pod \"placement-db-create-9z6cc\" (UID: \"b1633232-e2fd-4172-b929-22af732c2a8f\") " pod="openstack/placement-db-create-9z6cc" Nov 21 14:02:53 crc kubenswrapper[5133]: I1121 14:02:53.771647 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6tqj6\" (UniqueName: \"kubernetes.io/projected/b1633232-e2fd-4172-b929-22af732c2a8f-kube-api-access-6tqj6\") pod \"placement-db-create-9z6cc\" (UID: \"b1633232-e2fd-4172-b929-22af732c2a8f\") " pod="openstack/placement-db-create-9z6cc" Nov 21 14:02:53 crc kubenswrapper[5133]: I1121 14:02:53.771700 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2nxhw\" (UniqueName: \"kubernetes.io/projected/b48cb16b-2e16-4f3e-a083-e421d2ad2930-kube-api-access-2nxhw\") pod \"placement-01ea-account-create-5bjqm\" (UID: \"b48cb16b-2e16-4f3e-a083-e421d2ad2930\") " pod="openstack/placement-01ea-account-create-5bjqm" Nov 21 14:02:53 crc kubenswrapper[5133]: I1121 14:02:53.771758 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b48cb16b-2e16-4f3e-a083-e421d2ad2930-operator-scripts\") pod \"placement-01ea-account-create-5bjqm\" (UID: \"b48cb16b-2e16-4f3e-a083-e421d2ad2930\") " pod="openstack/placement-01ea-account-create-5bjqm" Nov 21 14:02:53 crc kubenswrapper[5133]: I1121 14:02:53.773049 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b1633232-e2fd-4172-b929-22af732c2a8f-operator-scripts\") pod \"placement-db-create-9z6cc\" (UID: \"b1633232-e2fd-4172-b929-22af732c2a8f\") " pod="openstack/placement-db-create-9z6cc" Nov 21 14:02:53 crc kubenswrapper[5133]: I1121 14:02:53.801895 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6tqj6\" (UniqueName: \"kubernetes.io/projected/b1633232-e2fd-4172-b929-22af732c2a8f-kube-api-access-6tqj6\") pod \"placement-db-create-9z6cc\" (UID: \"b1633232-e2fd-4172-b929-22af732c2a8f\") " pod="openstack/placement-db-create-9z6cc" Nov 21 14:02:53 crc kubenswrapper[5133]: I1121 14:02:53.871860 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-9z6cc" Nov 21 14:02:53 crc kubenswrapper[5133]: I1121 14:02:53.875085 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2nxhw\" (UniqueName: \"kubernetes.io/projected/b48cb16b-2e16-4f3e-a083-e421d2ad2930-kube-api-access-2nxhw\") pod \"placement-01ea-account-create-5bjqm\" (UID: \"b48cb16b-2e16-4f3e-a083-e421d2ad2930\") " pod="openstack/placement-01ea-account-create-5bjqm" Nov 21 14:02:53 crc kubenswrapper[5133]: I1121 14:02:53.875386 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b48cb16b-2e16-4f3e-a083-e421d2ad2930-operator-scripts\") pod \"placement-01ea-account-create-5bjqm\" (UID: \"b48cb16b-2e16-4f3e-a083-e421d2ad2930\") " pod="openstack/placement-01ea-account-create-5bjqm" Nov 21 14:02:53 crc kubenswrapper[5133]: I1121 14:02:53.876351 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b48cb16b-2e16-4f3e-a083-e421d2ad2930-operator-scripts\") pod \"placement-01ea-account-create-5bjqm\" (UID: \"b48cb16b-2e16-4f3e-a083-e421d2ad2930\") " pod="openstack/placement-01ea-account-create-5bjqm" Nov 21 14:02:53 crc kubenswrapper[5133]: I1121 14:02:53.890409 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-091f-account-create-m48fq"] Nov 21 14:02:53 crc kubenswrapper[5133]: I1121 14:02:53.893470 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2nxhw\" (UniqueName: \"kubernetes.io/projected/b48cb16b-2e16-4f3e-a083-e421d2ad2930-kube-api-access-2nxhw\") pod \"placement-01ea-account-create-5bjqm\" (UID: \"b48cb16b-2e16-4f3e-a083-e421d2ad2930\") " pod="openstack/placement-01ea-account-create-5bjqm" Nov 21 14:02:53 crc kubenswrapper[5133]: W1121 14:02:53.943168 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbaacbad7_4746_4ce7_92a6_c820bdd0a2ac.slice/crio-0179c36dd28918d75d086d66ae6c9942301997b822e41de52bfeb97a262891d1 WatchSource:0}: Error finding container 0179c36dd28918d75d086d66ae6c9942301997b822e41de52bfeb97a262891d1: Status 404 returned error can't find the container with id 0179c36dd28918d75d086d66ae6c9942301997b822e41de52bfeb97a262891d1 Nov 21 14:02:54 crc kubenswrapper[5133]: I1121 14:02:54.005859 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-jb7qk"] Nov 21 14:02:54 crc kubenswrapper[5133]: W1121 14:02:54.009129 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podedcec591_6e27_4287_abdf_71dd047698a9.slice/crio-39e481bfc5a817794e43f2f6234ff09bf225e1115ce81c0a0303fc40287c9c62 WatchSource:0}: Error finding container 39e481bfc5a817794e43f2f6234ff09bf225e1115ce81c0a0303fc40287c9c62: Status 404 returned error can't find the container with id 39e481bfc5a817794e43f2f6234ff09bf225e1115ce81c0a0303fc40287c9c62 Nov 21 14:02:54 crc kubenswrapper[5133]: I1121 14:02:54.033291 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-01ea-account-create-5bjqm" Nov 21 14:02:54 crc kubenswrapper[5133]: I1121 14:02:54.377056 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-9z6cc"] Nov 21 14:02:54 crc kubenswrapper[5133]: I1121 14:02:54.476437 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20185974-caab-4948-ae2b-159ad95e3f21" path="/var/lib/kubelet/pods/20185974-caab-4948-ae2b-159ad95e3f21/volumes" Nov 21 14:02:54 crc kubenswrapper[5133]: I1121 14:02:54.481262 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-01ea-account-create-5bjqm"] Nov 21 14:02:54 crc kubenswrapper[5133]: I1121 14:02:54.521416 5133 generic.go:334] "Generic (PLEG): container finished" podID="baacbad7-4746-4ce7-92a6-c820bdd0a2ac" containerID="51474f06ea677383540766f25941905209f4a26d9fc93361b532f2473b8e534b" exitCode=0 Nov 21 14:02:54 crc kubenswrapper[5133]: I1121 14:02:54.521522 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-091f-account-create-m48fq" event={"ID":"baacbad7-4746-4ce7-92a6-c820bdd0a2ac","Type":"ContainerDied","Data":"51474f06ea677383540766f25941905209f4a26d9fc93361b532f2473b8e534b"} Nov 21 14:02:54 crc kubenswrapper[5133]: I1121 14:02:54.521568 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-091f-account-create-m48fq" event={"ID":"baacbad7-4746-4ce7-92a6-c820bdd0a2ac","Type":"ContainerStarted","Data":"0179c36dd28918d75d086d66ae6c9942301997b822e41de52bfeb97a262891d1"} Nov 21 14:02:54 crc kubenswrapper[5133]: I1121 14:02:54.523871 5133 generic.go:334] "Generic (PLEG): container finished" podID="edcec591-6e27-4287-abdf-71dd047698a9" containerID="4c54d1c5b88d2133b7f2fe3ad66be6d7a5f34ff7941f062ce9351b4dd06e640b" exitCode=0 Nov 21 14:02:54 crc kubenswrapper[5133]: I1121 14:02:54.523921 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-jb7qk" event={"ID":"edcec591-6e27-4287-abdf-71dd047698a9","Type":"ContainerDied","Data":"4c54d1c5b88d2133b7f2fe3ad66be6d7a5f34ff7941f062ce9351b4dd06e640b"} Nov 21 14:02:54 crc kubenswrapper[5133]: I1121 14:02:54.523986 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-jb7qk" event={"ID":"edcec591-6e27-4287-abdf-71dd047698a9","Type":"ContainerStarted","Data":"39e481bfc5a817794e43f2f6234ff09bf225e1115ce81c0a0303fc40287c9c62"} Nov 21 14:02:54 crc kubenswrapper[5133]: I1121 14:02:54.528174 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-9z6cc" event={"ID":"b1633232-e2fd-4172-b929-22af732c2a8f","Type":"ContainerStarted","Data":"f67b95dea06e97ccba6cbabe60b7f8cebe74e2696b26b2a2030f4a77181eb2f5"} Nov 21 14:02:54 crc kubenswrapper[5133]: W1121 14:02:54.598955 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb48cb16b_2e16_4f3e_a083_e421d2ad2930.slice/crio-4d56ab9bde07314ee54d6ad0c9887b4af8cedd27bd38e6c770ab1db594c8bc0d WatchSource:0}: Error finding container 4d56ab9bde07314ee54d6ad0c9887b4af8cedd27bd38e6c770ab1db594c8bc0d: Status 404 returned error can't find the container with id 4d56ab9bde07314ee54d6ad0c9887b4af8cedd27bd38e6c770ab1db594c8bc0d Nov 21 14:02:55 crc kubenswrapper[5133]: I1121 14:02:55.544211 5133 generic.go:334] "Generic (PLEG): container finished" podID="b48cb16b-2e16-4f3e-a083-e421d2ad2930" containerID="da5ed15d2c49c142e0474a16aa74fa58366bf518a297597d2b94ef2b43e9a159" exitCode=0 Nov 21 14:02:55 crc kubenswrapper[5133]: I1121 14:02:55.544322 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-01ea-account-create-5bjqm" event={"ID":"b48cb16b-2e16-4f3e-a083-e421d2ad2930","Type":"ContainerDied","Data":"da5ed15d2c49c142e0474a16aa74fa58366bf518a297597d2b94ef2b43e9a159"} Nov 21 14:02:55 crc kubenswrapper[5133]: I1121 14:02:55.544825 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-01ea-account-create-5bjqm" event={"ID":"b48cb16b-2e16-4f3e-a083-e421d2ad2930","Type":"ContainerStarted","Data":"4d56ab9bde07314ee54d6ad0c9887b4af8cedd27bd38e6c770ab1db594c8bc0d"} Nov 21 14:02:55 crc kubenswrapper[5133]: I1121 14:02:55.548820 5133 generic.go:334] "Generic (PLEG): container finished" podID="b1633232-e2fd-4172-b929-22af732c2a8f" containerID="b65f98abb1a499f4f403b4c63640a574e05086e10dcc6a467bc4331c9e681552" exitCode=0 Nov 21 14:02:55 crc kubenswrapper[5133]: I1121 14:02:55.548924 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-9z6cc" event={"ID":"b1633232-e2fd-4172-b929-22af732c2a8f","Type":"ContainerDied","Data":"b65f98abb1a499f4f403b4c63640a574e05086e10dcc6a467bc4331c9e681552"} Nov 21 14:02:55 crc kubenswrapper[5133]: I1121 14:02:55.958375 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-091f-account-create-m48fq" Nov 21 14:02:55 crc kubenswrapper[5133]: I1121 14:02:55.967115 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-jb7qk" Nov 21 14:02:56 crc kubenswrapper[5133]: I1121 14:02:56.121141 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/baacbad7-4746-4ce7-92a6-c820bdd0a2ac-operator-scripts\") pod \"baacbad7-4746-4ce7-92a6-c820bdd0a2ac\" (UID: \"baacbad7-4746-4ce7-92a6-c820bdd0a2ac\") " Nov 21 14:02:56 crc kubenswrapper[5133]: I1121 14:02:56.121538 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vsgzd\" (UniqueName: \"kubernetes.io/projected/edcec591-6e27-4287-abdf-71dd047698a9-kube-api-access-vsgzd\") pod \"edcec591-6e27-4287-abdf-71dd047698a9\" (UID: \"edcec591-6e27-4287-abdf-71dd047698a9\") " Nov 21 14:02:56 crc kubenswrapper[5133]: I1121 14:02:56.121792 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/edcec591-6e27-4287-abdf-71dd047698a9-operator-scripts\") pod \"edcec591-6e27-4287-abdf-71dd047698a9\" (UID: \"edcec591-6e27-4287-abdf-71dd047698a9\") " Nov 21 14:02:56 crc kubenswrapper[5133]: I1121 14:02:56.121884 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kbfk9\" (UniqueName: \"kubernetes.io/projected/baacbad7-4746-4ce7-92a6-c820bdd0a2ac-kube-api-access-kbfk9\") pod \"baacbad7-4746-4ce7-92a6-c820bdd0a2ac\" (UID: \"baacbad7-4746-4ce7-92a6-c820bdd0a2ac\") " Nov 21 14:02:56 crc kubenswrapper[5133]: I1121 14:02:56.121910 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/baacbad7-4746-4ce7-92a6-c820bdd0a2ac-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "baacbad7-4746-4ce7-92a6-c820bdd0a2ac" (UID: "baacbad7-4746-4ce7-92a6-c820bdd0a2ac"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:02:56 crc kubenswrapper[5133]: I1121 14:02:56.122327 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/edcec591-6e27-4287-abdf-71dd047698a9-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "edcec591-6e27-4287-abdf-71dd047698a9" (UID: "edcec591-6e27-4287-abdf-71dd047698a9"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:02:56 crc kubenswrapper[5133]: I1121 14:02:56.123124 5133 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/edcec591-6e27-4287-abdf-71dd047698a9-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 14:02:56 crc kubenswrapper[5133]: I1121 14:02:56.123179 5133 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/baacbad7-4746-4ce7-92a6-c820bdd0a2ac-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 14:02:56 crc kubenswrapper[5133]: I1121 14:02:56.129199 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/edcec591-6e27-4287-abdf-71dd047698a9-kube-api-access-vsgzd" (OuterVolumeSpecName: "kube-api-access-vsgzd") pod "edcec591-6e27-4287-abdf-71dd047698a9" (UID: "edcec591-6e27-4287-abdf-71dd047698a9"). InnerVolumeSpecName "kube-api-access-vsgzd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:02:56 crc kubenswrapper[5133]: I1121 14:02:56.129237 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/baacbad7-4746-4ce7-92a6-c820bdd0a2ac-kube-api-access-kbfk9" (OuterVolumeSpecName: "kube-api-access-kbfk9") pod "baacbad7-4746-4ce7-92a6-c820bdd0a2ac" (UID: "baacbad7-4746-4ce7-92a6-c820bdd0a2ac"). InnerVolumeSpecName "kube-api-access-kbfk9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:02:56 crc kubenswrapper[5133]: I1121 14:02:56.225639 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vsgzd\" (UniqueName: \"kubernetes.io/projected/edcec591-6e27-4287-abdf-71dd047698a9-kube-api-access-vsgzd\") on node \"crc\" DevicePath \"\"" Nov 21 14:02:56 crc kubenswrapper[5133]: I1121 14:02:56.225699 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kbfk9\" (UniqueName: \"kubernetes.io/projected/baacbad7-4746-4ce7-92a6-c820bdd0a2ac-kube-api-access-kbfk9\") on node \"crc\" DevicePath \"\"" Nov 21 14:02:56 crc kubenswrapper[5133]: I1121 14:02:56.240100 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Nov 21 14:02:56 crc kubenswrapper[5133]: I1121 14:02:56.560563 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-jb7qk" event={"ID":"edcec591-6e27-4287-abdf-71dd047698a9","Type":"ContainerDied","Data":"39e481bfc5a817794e43f2f6234ff09bf225e1115ce81c0a0303fc40287c9c62"} Nov 21 14:02:56 crc kubenswrapper[5133]: I1121 14:02:56.560629 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="39e481bfc5a817794e43f2f6234ff09bf225e1115ce81c0a0303fc40287c9c62" Nov 21 14:02:56 crc kubenswrapper[5133]: I1121 14:02:56.560598 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-jb7qk" Nov 21 14:02:56 crc kubenswrapper[5133]: I1121 14:02:56.562777 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-091f-account-create-m48fq" event={"ID":"baacbad7-4746-4ce7-92a6-c820bdd0a2ac","Type":"ContainerDied","Data":"0179c36dd28918d75d086d66ae6c9942301997b822e41de52bfeb97a262891d1"} Nov 21 14:02:56 crc kubenswrapper[5133]: I1121 14:02:56.562830 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0179c36dd28918d75d086d66ae6c9942301997b822e41de52bfeb97a262891d1" Nov 21 14:02:56 crc kubenswrapper[5133]: I1121 14:02:56.562925 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-091f-account-create-m48fq" Nov 21 14:02:56 crc kubenswrapper[5133]: I1121 14:02:56.969536 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-9z6cc" Nov 21 14:02:56 crc kubenswrapper[5133]: I1121 14:02:56.973570 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-01ea-account-create-5bjqm" Nov 21 14:02:57 crc kubenswrapper[5133]: I1121 14:02:57.144173 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6tqj6\" (UniqueName: \"kubernetes.io/projected/b1633232-e2fd-4172-b929-22af732c2a8f-kube-api-access-6tqj6\") pod \"b1633232-e2fd-4172-b929-22af732c2a8f\" (UID: \"b1633232-e2fd-4172-b929-22af732c2a8f\") " Nov 21 14:02:57 crc kubenswrapper[5133]: I1121 14:02:57.144354 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b1633232-e2fd-4172-b929-22af732c2a8f-operator-scripts\") pod \"b1633232-e2fd-4172-b929-22af732c2a8f\" (UID: \"b1633232-e2fd-4172-b929-22af732c2a8f\") " Nov 21 14:02:57 crc kubenswrapper[5133]: I1121 14:02:57.144402 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b48cb16b-2e16-4f3e-a083-e421d2ad2930-operator-scripts\") pod \"b48cb16b-2e16-4f3e-a083-e421d2ad2930\" (UID: \"b48cb16b-2e16-4f3e-a083-e421d2ad2930\") " Nov 21 14:02:57 crc kubenswrapper[5133]: I1121 14:02:57.144478 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2nxhw\" (UniqueName: \"kubernetes.io/projected/b48cb16b-2e16-4f3e-a083-e421d2ad2930-kube-api-access-2nxhw\") pod \"b48cb16b-2e16-4f3e-a083-e421d2ad2930\" (UID: \"b48cb16b-2e16-4f3e-a083-e421d2ad2930\") " Nov 21 14:02:57 crc kubenswrapper[5133]: I1121 14:02:57.145028 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b1633232-e2fd-4172-b929-22af732c2a8f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b1633232-e2fd-4172-b929-22af732c2a8f" (UID: "b1633232-e2fd-4172-b929-22af732c2a8f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:02:57 crc kubenswrapper[5133]: I1121 14:02:57.145335 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b48cb16b-2e16-4f3e-a083-e421d2ad2930-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b48cb16b-2e16-4f3e-a083-e421d2ad2930" (UID: "b48cb16b-2e16-4f3e-a083-e421d2ad2930"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:02:57 crc kubenswrapper[5133]: I1121 14:02:57.148776 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b48cb16b-2e16-4f3e-a083-e421d2ad2930-kube-api-access-2nxhw" (OuterVolumeSpecName: "kube-api-access-2nxhw") pod "b48cb16b-2e16-4f3e-a083-e421d2ad2930" (UID: "b48cb16b-2e16-4f3e-a083-e421d2ad2930"). InnerVolumeSpecName "kube-api-access-2nxhw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:02:57 crc kubenswrapper[5133]: I1121 14:02:57.152319 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b1633232-e2fd-4172-b929-22af732c2a8f-kube-api-access-6tqj6" (OuterVolumeSpecName: "kube-api-access-6tqj6") pod "b1633232-e2fd-4172-b929-22af732c2a8f" (UID: "b1633232-e2fd-4172-b929-22af732c2a8f"). InnerVolumeSpecName "kube-api-access-6tqj6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:02:57 crc kubenswrapper[5133]: I1121 14:02:57.246311 5133 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b1633232-e2fd-4172-b929-22af732c2a8f-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 14:02:57 crc kubenswrapper[5133]: I1121 14:02:57.246619 5133 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b48cb16b-2e16-4f3e-a083-e421d2ad2930-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 14:02:57 crc kubenswrapper[5133]: I1121 14:02:57.246709 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2nxhw\" (UniqueName: \"kubernetes.io/projected/b48cb16b-2e16-4f3e-a083-e421d2ad2930-kube-api-access-2nxhw\") on node \"crc\" DevicePath \"\"" Nov 21 14:02:57 crc kubenswrapper[5133]: I1121 14:02:57.246792 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6tqj6\" (UniqueName: \"kubernetes.io/projected/b1633232-e2fd-4172-b929-22af732c2a8f-kube-api-access-6tqj6\") on node \"crc\" DevicePath \"\"" Nov 21 14:02:57 crc kubenswrapper[5133]: I1121 14:02:57.781709 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-01ea-account-create-5bjqm" event={"ID":"b48cb16b-2e16-4f3e-a083-e421d2ad2930","Type":"ContainerDied","Data":"4d56ab9bde07314ee54d6ad0c9887b4af8cedd27bd38e6c770ab1db594c8bc0d"} Nov 21 14:02:57 crc kubenswrapper[5133]: I1121 14:02:57.781769 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4d56ab9bde07314ee54d6ad0c9887b4af8cedd27bd38e6c770ab1db594c8bc0d" Nov 21 14:02:57 crc kubenswrapper[5133]: I1121 14:02:57.781843 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-01ea-account-create-5bjqm" Nov 21 14:02:57 crc kubenswrapper[5133]: I1121 14:02:57.787700 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-9z6cc" event={"ID":"b1633232-e2fd-4172-b929-22af732c2a8f","Type":"ContainerDied","Data":"f67b95dea06e97ccba6cbabe60b7f8cebe74e2696b26b2a2030f4a77181eb2f5"} Nov 21 14:02:57 crc kubenswrapper[5133]: I1121 14:02:57.787768 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f67b95dea06e97ccba6cbabe60b7f8cebe74e2696b26b2a2030f4a77181eb2f5" Nov 21 14:02:57 crc kubenswrapper[5133]: I1121 14:02:57.787806 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-9z6cc" Nov 21 14:02:58 crc kubenswrapper[5133]: I1121 14:02:58.823259 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-lrrtw"] Nov 21 14:02:58 crc kubenswrapper[5133]: E1121 14:02:58.824186 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b1633232-e2fd-4172-b929-22af732c2a8f" containerName="mariadb-database-create" Nov 21 14:02:58 crc kubenswrapper[5133]: I1121 14:02:58.824203 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="b1633232-e2fd-4172-b929-22af732c2a8f" containerName="mariadb-database-create" Nov 21 14:02:58 crc kubenswrapper[5133]: E1121 14:02:58.824219 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="baacbad7-4746-4ce7-92a6-c820bdd0a2ac" containerName="mariadb-account-create" Nov 21 14:02:58 crc kubenswrapper[5133]: I1121 14:02:58.824225 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="baacbad7-4746-4ce7-92a6-c820bdd0a2ac" containerName="mariadb-account-create" Nov 21 14:02:58 crc kubenswrapper[5133]: E1121 14:02:58.824253 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b48cb16b-2e16-4f3e-a083-e421d2ad2930" containerName="mariadb-account-create" Nov 21 14:02:58 crc kubenswrapper[5133]: I1121 14:02:58.824264 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="b48cb16b-2e16-4f3e-a083-e421d2ad2930" containerName="mariadb-account-create" Nov 21 14:02:58 crc kubenswrapper[5133]: E1121 14:02:58.824289 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="edcec591-6e27-4287-abdf-71dd047698a9" containerName="mariadb-database-create" Nov 21 14:02:58 crc kubenswrapper[5133]: I1121 14:02:58.824297 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="edcec591-6e27-4287-abdf-71dd047698a9" containerName="mariadb-database-create" Nov 21 14:02:58 crc kubenswrapper[5133]: I1121 14:02:58.824487 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="b48cb16b-2e16-4f3e-a083-e421d2ad2930" containerName="mariadb-account-create" Nov 21 14:02:58 crc kubenswrapper[5133]: I1121 14:02:58.824519 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="b1633232-e2fd-4172-b929-22af732c2a8f" containerName="mariadb-database-create" Nov 21 14:02:58 crc kubenswrapper[5133]: I1121 14:02:58.824535 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="baacbad7-4746-4ce7-92a6-c820bdd0a2ac" containerName="mariadb-account-create" Nov 21 14:02:58 crc kubenswrapper[5133]: I1121 14:02:58.824545 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="edcec591-6e27-4287-abdf-71dd047698a9" containerName="mariadb-database-create" Nov 21 14:02:58 crc kubenswrapper[5133]: I1121 14:02:58.825239 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-lrrtw" Nov 21 14:02:58 crc kubenswrapper[5133]: I1121 14:02:58.839446 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-lrrtw"] Nov 21 14:02:58 crc kubenswrapper[5133]: I1121 14:02:58.896437 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ede4b5ec-0692-4ab1-857d-df00dea3d1bf-operator-scripts\") pod \"glance-db-create-lrrtw\" (UID: \"ede4b5ec-0692-4ab1-857d-df00dea3d1bf\") " pod="openstack/glance-db-create-lrrtw" Nov 21 14:02:58 crc kubenswrapper[5133]: I1121 14:02:58.896580 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b4cgg\" (UniqueName: \"kubernetes.io/projected/ede4b5ec-0692-4ab1-857d-df00dea3d1bf-kube-api-access-b4cgg\") pod \"glance-db-create-lrrtw\" (UID: \"ede4b5ec-0692-4ab1-857d-df00dea3d1bf\") " pod="openstack/glance-db-create-lrrtw" Nov 21 14:02:58 crc kubenswrapper[5133]: I1121 14:02:58.974620 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-52d3-account-create-7b77h"] Nov 21 14:02:58 crc kubenswrapper[5133]: I1121 14:02:58.976662 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-52d3-account-create-7b77h" Nov 21 14:02:58 crc kubenswrapper[5133]: I1121 14:02:58.980054 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Nov 21 14:02:58 crc kubenswrapper[5133]: I1121 14:02:58.985083 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-52d3-account-create-7b77h"] Nov 21 14:02:58 crc kubenswrapper[5133]: I1121 14:02:58.998184 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-svp9d\" (UniqueName: \"kubernetes.io/projected/0c98f849-edf5-4072-a707-fa87dc3f3b61-kube-api-access-svp9d\") pod \"glance-52d3-account-create-7b77h\" (UID: \"0c98f849-edf5-4072-a707-fa87dc3f3b61\") " pod="openstack/glance-52d3-account-create-7b77h" Nov 21 14:02:58 crc kubenswrapper[5133]: I1121 14:02:58.998241 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0c98f849-edf5-4072-a707-fa87dc3f3b61-operator-scripts\") pod \"glance-52d3-account-create-7b77h\" (UID: \"0c98f849-edf5-4072-a707-fa87dc3f3b61\") " pod="openstack/glance-52d3-account-create-7b77h" Nov 21 14:02:58 crc kubenswrapper[5133]: I1121 14:02:58.998295 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ede4b5ec-0692-4ab1-857d-df00dea3d1bf-operator-scripts\") pod \"glance-db-create-lrrtw\" (UID: \"ede4b5ec-0692-4ab1-857d-df00dea3d1bf\") " pod="openstack/glance-db-create-lrrtw" Nov 21 14:02:58 crc kubenswrapper[5133]: I1121 14:02:58.998730 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b4cgg\" (UniqueName: \"kubernetes.io/projected/ede4b5ec-0692-4ab1-857d-df00dea3d1bf-kube-api-access-b4cgg\") pod \"glance-db-create-lrrtw\" (UID: \"ede4b5ec-0692-4ab1-857d-df00dea3d1bf\") " pod="openstack/glance-db-create-lrrtw" Nov 21 14:02:59 crc kubenswrapper[5133]: I1121 14:02:59.000264 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ede4b5ec-0692-4ab1-857d-df00dea3d1bf-operator-scripts\") pod \"glance-db-create-lrrtw\" (UID: \"ede4b5ec-0692-4ab1-857d-df00dea3d1bf\") " pod="openstack/glance-db-create-lrrtw" Nov 21 14:02:59 crc kubenswrapper[5133]: I1121 14:02:59.025557 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b4cgg\" (UniqueName: \"kubernetes.io/projected/ede4b5ec-0692-4ab1-857d-df00dea3d1bf-kube-api-access-b4cgg\") pod \"glance-db-create-lrrtw\" (UID: \"ede4b5ec-0692-4ab1-857d-df00dea3d1bf\") " pod="openstack/glance-db-create-lrrtw" Nov 21 14:02:59 crc kubenswrapper[5133]: I1121 14:02:59.101254 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-svp9d\" (UniqueName: \"kubernetes.io/projected/0c98f849-edf5-4072-a707-fa87dc3f3b61-kube-api-access-svp9d\") pod \"glance-52d3-account-create-7b77h\" (UID: \"0c98f849-edf5-4072-a707-fa87dc3f3b61\") " pod="openstack/glance-52d3-account-create-7b77h" Nov 21 14:02:59 crc kubenswrapper[5133]: I1121 14:02:59.101313 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0c98f849-edf5-4072-a707-fa87dc3f3b61-operator-scripts\") pod \"glance-52d3-account-create-7b77h\" (UID: \"0c98f849-edf5-4072-a707-fa87dc3f3b61\") " pod="openstack/glance-52d3-account-create-7b77h" Nov 21 14:02:59 crc kubenswrapper[5133]: I1121 14:02:59.102466 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0c98f849-edf5-4072-a707-fa87dc3f3b61-operator-scripts\") pod \"glance-52d3-account-create-7b77h\" (UID: \"0c98f849-edf5-4072-a707-fa87dc3f3b61\") " pod="openstack/glance-52d3-account-create-7b77h" Nov 21 14:02:59 crc kubenswrapper[5133]: I1121 14:02:59.120515 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-svp9d\" (UniqueName: \"kubernetes.io/projected/0c98f849-edf5-4072-a707-fa87dc3f3b61-kube-api-access-svp9d\") pod \"glance-52d3-account-create-7b77h\" (UID: \"0c98f849-edf5-4072-a707-fa87dc3f3b61\") " pod="openstack/glance-52d3-account-create-7b77h" Nov 21 14:02:59 crc kubenswrapper[5133]: I1121 14:02:59.142991 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-lrrtw" Nov 21 14:02:59 crc kubenswrapper[5133]: I1121 14:02:59.296847 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-52d3-account-create-7b77h" Nov 21 14:02:59 crc kubenswrapper[5133]: I1121 14:02:59.825157 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-lrrtw"] Nov 21 14:02:59 crc kubenswrapper[5133]: W1121 14:02:59.829799 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podede4b5ec_0692_4ab1_857d_df00dea3d1bf.slice/crio-df40d214fb20a320cf6b7ff6482f367f0eaa3bff9c05e7c303042884ae3baac3 WatchSource:0}: Error finding container df40d214fb20a320cf6b7ff6482f367f0eaa3bff9c05e7c303042884ae3baac3: Status 404 returned error can't find the container with id df40d214fb20a320cf6b7ff6482f367f0eaa3bff9c05e7c303042884ae3baac3 Nov 21 14:02:59 crc kubenswrapper[5133]: I1121 14:02:59.954652 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-52d3-account-create-7b77h"] Nov 21 14:03:00 crc kubenswrapper[5133]: I1121 14:03:00.820239 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-lrrtw" event={"ID":"ede4b5ec-0692-4ab1-857d-df00dea3d1bf","Type":"ContainerStarted","Data":"df40d214fb20a320cf6b7ff6482f367f0eaa3bff9c05e7c303042884ae3baac3"} Nov 21 14:03:00 crc kubenswrapper[5133]: I1121 14:03:00.822257 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-52d3-account-create-7b77h" event={"ID":"0c98f849-edf5-4072-a707-fa87dc3f3b61","Type":"ContainerStarted","Data":"d02a3cb7a97385ee99782c2a86906e7520b396946ab65f2b3b42b7716588733f"} Nov 21 14:03:01 crc kubenswrapper[5133]: I1121 14:03:01.834542 5133 generic.go:334] "Generic (PLEG): container finished" podID="0c98f849-edf5-4072-a707-fa87dc3f3b61" containerID="fc48dddf1308fed80597b3816c44b9c98a5812e090a4b8acafd9ca3f4b61a18d" exitCode=0 Nov 21 14:03:01 crc kubenswrapper[5133]: I1121 14:03:01.835257 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-52d3-account-create-7b77h" event={"ID":"0c98f849-edf5-4072-a707-fa87dc3f3b61","Type":"ContainerDied","Data":"fc48dddf1308fed80597b3816c44b9c98a5812e090a4b8acafd9ca3f4b61a18d"} Nov 21 14:03:01 crc kubenswrapper[5133]: I1121 14:03:01.837493 5133 generic.go:334] "Generic (PLEG): container finished" podID="ede4b5ec-0692-4ab1-857d-df00dea3d1bf" containerID="7f3a6b02b119aedbd74ffd4430252a5eada59d520b2b664e9a776cae08326942" exitCode=0 Nov 21 14:03:01 crc kubenswrapper[5133]: I1121 14:03:01.837529 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-lrrtw" event={"ID":"ede4b5ec-0692-4ab1-857d-df00dea3d1bf","Type":"ContainerDied","Data":"7f3a6b02b119aedbd74ffd4430252a5eada59d520b2b664e9a776cae08326942"} Nov 21 14:03:03 crc kubenswrapper[5133]: I1121 14:03:03.222411 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-52d3-account-create-7b77h" Nov 21 14:03:03 crc kubenswrapper[5133]: I1121 14:03:03.331998 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-lrrtw" Nov 21 14:03:03 crc kubenswrapper[5133]: I1121 14:03:03.392844 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0c98f849-edf5-4072-a707-fa87dc3f3b61-operator-scripts\") pod \"0c98f849-edf5-4072-a707-fa87dc3f3b61\" (UID: \"0c98f849-edf5-4072-a707-fa87dc3f3b61\") " Nov 21 14:03:03 crc kubenswrapper[5133]: I1121 14:03:03.393556 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-svp9d\" (UniqueName: \"kubernetes.io/projected/0c98f849-edf5-4072-a707-fa87dc3f3b61-kube-api-access-svp9d\") pod \"0c98f849-edf5-4072-a707-fa87dc3f3b61\" (UID: \"0c98f849-edf5-4072-a707-fa87dc3f3b61\") " Nov 21 14:03:03 crc kubenswrapper[5133]: I1121 14:03:03.394817 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0c98f849-edf5-4072-a707-fa87dc3f3b61-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "0c98f849-edf5-4072-a707-fa87dc3f3b61" (UID: "0c98f849-edf5-4072-a707-fa87dc3f3b61"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:03:03 crc kubenswrapper[5133]: I1121 14:03:03.406123 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0c98f849-edf5-4072-a707-fa87dc3f3b61-kube-api-access-svp9d" (OuterVolumeSpecName: "kube-api-access-svp9d") pod "0c98f849-edf5-4072-a707-fa87dc3f3b61" (UID: "0c98f849-edf5-4072-a707-fa87dc3f3b61"). InnerVolumeSpecName "kube-api-access-svp9d". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:03:03 crc kubenswrapper[5133]: I1121 14:03:03.495340 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ede4b5ec-0692-4ab1-857d-df00dea3d1bf-operator-scripts\") pod \"ede4b5ec-0692-4ab1-857d-df00dea3d1bf\" (UID: \"ede4b5ec-0692-4ab1-857d-df00dea3d1bf\") " Nov 21 14:03:03 crc kubenswrapper[5133]: I1121 14:03:03.495492 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b4cgg\" (UniqueName: \"kubernetes.io/projected/ede4b5ec-0692-4ab1-857d-df00dea3d1bf-kube-api-access-b4cgg\") pod \"ede4b5ec-0692-4ab1-857d-df00dea3d1bf\" (UID: \"ede4b5ec-0692-4ab1-857d-df00dea3d1bf\") " Nov 21 14:03:03 crc kubenswrapper[5133]: I1121 14:03:03.496328 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ede4b5ec-0692-4ab1-857d-df00dea3d1bf-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ede4b5ec-0692-4ab1-857d-df00dea3d1bf" (UID: "ede4b5ec-0692-4ab1-857d-df00dea3d1bf"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:03:03 crc kubenswrapper[5133]: I1121 14:03:03.496806 5133 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0c98f849-edf5-4072-a707-fa87dc3f3b61-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 14:03:03 crc kubenswrapper[5133]: I1121 14:03:03.496840 5133 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ede4b5ec-0692-4ab1-857d-df00dea3d1bf-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 14:03:03 crc kubenswrapper[5133]: I1121 14:03:03.496853 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-svp9d\" (UniqueName: \"kubernetes.io/projected/0c98f849-edf5-4072-a707-fa87dc3f3b61-kube-api-access-svp9d\") on node \"crc\" DevicePath \"\"" Nov 21 14:03:03 crc kubenswrapper[5133]: I1121 14:03:03.500337 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ede4b5ec-0692-4ab1-857d-df00dea3d1bf-kube-api-access-b4cgg" (OuterVolumeSpecName: "kube-api-access-b4cgg") pod "ede4b5ec-0692-4ab1-857d-df00dea3d1bf" (UID: "ede4b5ec-0692-4ab1-857d-df00dea3d1bf"). InnerVolumeSpecName "kube-api-access-b4cgg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:03:03 crc kubenswrapper[5133]: I1121 14:03:03.598333 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b4cgg\" (UniqueName: \"kubernetes.io/projected/ede4b5ec-0692-4ab1-857d-df00dea3d1bf-kube-api-access-b4cgg\") on node \"crc\" DevicePath \"\"" Nov 21 14:03:03 crc kubenswrapper[5133]: I1121 14:03:03.856406 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-52d3-account-create-7b77h" event={"ID":"0c98f849-edf5-4072-a707-fa87dc3f3b61","Type":"ContainerDied","Data":"d02a3cb7a97385ee99782c2a86906e7520b396946ab65f2b3b42b7716588733f"} Nov 21 14:03:03 crc kubenswrapper[5133]: I1121 14:03:03.856450 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-52d3-account-create-7b77h" Nov 21 14:03:03 crc kubenswrapper[5133]: I1121 14:03:03.856556 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d02a3cb7a97385ee99782c2a86906e7520b396946ab65f2b3b42b7716588733f" Nov 21 14:03:03 crc kubenswrapper[5133]: I1121 14:03:03.859533 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-lrrtw" event={"ID":"ede4b5ec-0692-4ab1-857d-df00dea3d1bf","Type":"ContainerDied","Data":"df40d214fb20a320cf6b7ff6482f367f0eaa3bff9c05e7c303042884ae3baac3"} Nov 21 14:03:03 crc kubenswrapper[5133]: I1121 14:03:03.859594 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="df40d214fb20a320cf6b7ff6482f367f0eaa3bff9c05e7c303042884ae3baac3" Nov 21 14:03:03 crc kubenswrapper[5133]: I1121 14:03:03.859606 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-lrrtw" Nov 21 14:03:08 crc kubenswrapper[5133]: I1121 14:03:08.969130 5133 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-2ckfn" podUID="6afb5f16-3806-4fbf-becf-8bf66576286f" containerName="ovn-controller" probeResult="failure" output=< Nov 21 14:03:08 crc kubenswrapper[5133]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 21 14:03:08 crc kubenswrapper[5133]: > Nov 21 14:03:09 crc kubenswrapper[5133]: I1121 14:03:09.007720 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-hbfsj" Nov 21 14:03:09 crc kubenswrapper[5133]: I1121 14:03:09.052462 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-hbfsj" Nov 21 14:03:09 crc kubenswrapper[5133]: I1121 14:03:09.064709 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-6blxq"] Nov 21 14:03:09 crc kubenswrapper[5133]: E1121 14:03:09.065243 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0c98f849-edf5-4072-a707-fa87dc3f3b61" containerName="mariadb-account-create" Nov 21 14:03:09 crc kubenswrapper[5133]: I1121 14:03:09.065268 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="0c98f849-edf5-4072-a707-fa87dc3f3b61" containerName="mariadb-account-create" Nov 21 14:03:09 crc kubenswrapper[5133]: E1121 14:03:09.065330 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ede4b5ec-0692-4ab1-857d-df00dea3d1bf" containerName="mariadb-database-create" Nov 21 14:03:09 crc kubenswrapper[5133]: I1121 14:03:09.065339 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="ede4b5ec-0692-4ab1-857d-df00dea3d1bf" containerName="mariadb-database-create" Nov 21 14:03:09 crc kubenswrapper[5133]: I1121 14:03:09.066883 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="0c98f849-edf5-4072-a707-fa87dc3f3b61" containerName="mariadb-account-create" Nov 21 14:03:09 crc kubenswrapper[5133]: I1121 14:03:09.066924 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="ede4b5ec-0692-4ab1-857d-df00dea3d1bf" containerName="mariadb-database-create" Nov 21 14:03:09 crc kubenswrapper[5133]: I1121 14:03:09.071318 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-6blxq" Nov 21 14:03:09 crc kubenswrapper[5133]: I1121 14:03:09.075639 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-cvwrt" Nov 21 14:03:09 crc kubenswrapper[5133]: I1121 14:03:09.075646 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Nov 21 14:03:09 crc kubenswrapper[5133]: I1121 14:03:09.079046 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-6blxq"] Nov 21 14:03:09 crc kubenswrapper[5133]: I1121 14:03:09.206626 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/76b580d8-fd56-40c0-a24a-1a3234d95ca6-combined-ca-bundle\") pod \"glance-db-sync-6blxq\" (UID: \"76b580d8-fd56-40c0-a24a-1a3234d95ca6\") " pod="openstack/glance-db-sync-6blxq" Nov 21 14:03:09 crc kubenswrapper[5133]: I1121 14:03:09.207032 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mr576\" (UniqueName: \"kubernetes.io/projected/76b580d8-fd56-40c0-a24a-1a3234d95ca6-kube-api-access-mr576\") pod \"glance-db-sync-6blxq\" (UID: \"76b580d8-fd56-40c0-a24a-1a3234d95ca6\") " pod="openstack/glance-db-sync-6blxq" Nov 21 14:03:09 crc kubenswrapper[5133]: I1121 14:03:09.207292 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/76b580d8-fd56-40c0-a24a-1a3234d95ca6-config-data\") pod \"glance-db-sync-6blxq\" (UID: \"76b580d8-fd56-40c0-a24a-1a3234d95ca6\") " pod="openstack/glance-db-sync-6blxq" Nov 21 14:03:09 crc kubenswrapper[5133]: I1121 14:03:09.207519 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/76b580d8-fd56-40c0-a24a-1a3234d95ca6-db-sync-config-data\") pod \"glance-db-sync-6blxq\" (UID: \"76b580d8-fd56-40c0-a24a-1a3234d95ca6\") " pod="openstack/glance-db-sync-6blxq" Nov 21 14:03:09 crc kubenswrapper[5133]: I1121 14:03:09.277822 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-2ckfn-config-4w7kq"] Nov 21 14:03:09 crc kubenswrapper[5133]: I1121 14:03:09.279069 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-2ckfn-config-4w7kq" Nov 21 14:03:09 crc kubenswrapper[5133]: I1121 14:03:09.281508 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Nov 21 14:03:09 crc kubenswrapper[5133]: I1121 14:03:09.293841 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-2ckfn-config-4w7kq"] Nov 21 14:03:09 crc kubenswrapper[5133]: I1121 14:03:09.309048 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/76b580d8-fd56-40c0-a24a-1a3234d95ca6-combined-ca-bundle\") pod \"glance-db-sync-6blxq\" (UID: \"76b580d8-fd56-40c0-a24a-1a3234d95ca6\") " pod="openstack/glance-db-sync-6blxq" Nov 21 14:03:09 crc kubenswrapper[5133]: I1121 14:03:09.309133 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/7e4ac98c-2e0d-45a0-94c0-d683f7f7a737-var-log-ovn\") pod \"ovn-controller-2ckfn-config-4w7kq\" (UID: \"7e4ac98c-2e0d-45a0-94c0-d683f7f7a737\") " pod="openstack/ovn-controller-2ckfn-config-4w7kq" Nov 21 14:03:09 crc kubenswrapper[5133]: I1121 14:03:09.309161 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7e4ac98c-2e0d-45a0-94c0-d683f7f7a737-scripts\") pod \"ovn-controller-2ckfn-config-4w7kq\" (UID: \"7e4ac98c-2e0d-45a0-94c0-d683f7f7a737\") " pod="openstack/ovn-controller-2ckfn-config-4w7kq" Nov 21 14:03:09 crc kubenswrapper[5133]: I1121 14:03:09.309200 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mr576\" (UniqueName: \"kubernetes.io/projected/76b580d8-fd56-40c0-a24a-1a3234d95ca6-kube-api-access-mr576\") pod \"glance-db-sync-6blxq\" (UID: \"76b580d8-fd56-40c0-a24a-1a3234d95ca6\") " pod="openstack/glance-db-sync-6blxq" Nov 21 14:03:09 crc kubenswrapper[5133]: I1121 14:03:09.309232 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/76b580d8-fd56-40c0-a24a-1a3234d95ca6-config-data\") pod \"glance-db-sync-6blxq\" (UID: \"76b580d8-fd56-40c0-a24a-1a3234d95ca6\") " pod="openstack/glance-db-sync-6blxq" Nov 21 14:03:09 crc kubenswrapper[5133]: I1121 14:03:09.309264 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/7e4ac98c-2e0d-45a0-94c0-d683f7f7a737-var-run\") pod \"ovn-controller-2ckfn-config-4w7kq\" (UID: \"7e4ac98c-2e0d-45a0-94c0-d683f7f7a737\") " pod="openstack/ovn-controller-2ckfn-config-4w7kq" Nov 21 14:03:09 crc kubenswrapper[5133]: I1121 14:03:09.309289 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/7e4ac98c-2e0d-45a0-94c0-d683f7f7a737-var-run-ovn\") pod \"ovn-controller-2ckfn-config-4w7kq\" (UID: \"7e4ac98c-2e0d-45a0-94c0-d683f7f7a737\") " pod="openstack/ovn-controller-2ckfn-config-4w7kq" Nov 21 14:03:09 crc kubenswrapper[5133]: I1121 14:03:09.309341 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/76b580d8-fd56-40c0-a24a-1a3234d95ca6-db-sync-config-data\") pod \"glance-db-sync-6blxq\" (UID: \"76b580d8-fd56-40c0-a24a-1a3234d95ca6\") " pod="openstack/glance-db-sync-6blxq" Nov 21 14:03:09 crc kubenswrapper[5133]: I1121 14:03:09.309366 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/7e4ac98c-2e0d-45a0-94c0-d683f7f7a737-additional-scripts\") pod \"ovn-controller-2ckfn-config-4w7kq\" (UID: \"7e4ac98c-2e0d-45a0-94c0-d683f7f7a737\") " pod="openstack/ovn-controller-2ckfn-config-4w7kq" Nov 21 14:03:09 crc kubenswrapper[5133]: I1121 14:03:09.309423 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7gq9x\" (UniqueName: \"kubernetes.io/projected/7e4ac98c-2e0d-45a0-94c0-d683f7f7a737-kube-api-access-7gq9x\") pod \"ovn-controller-2ckfn-config-4w7kq\" (UID: \"7e4ac98c-2e0d-45a0-94c0-d683f7f7a737\") " pod="openstack/ovn-controller-2ckfn-config-4w7kq" Nov 21 14:03:09 crc kubenswrapper[5133]: I1121 14:03:09.318874 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/76b580d8-fd56-40c0-a24a-1a3234d95ca6-db-sync-config-data\") pod \"glance-db-sync-6blxq\" (UID: \"76b580d8-fd56-40c0-a24a-1a3234d95ca6\") " pod="openstack/glance-db-sync-6blxq" Nov 21 14:03:09 crc kubenswrapper[5133]: I1121 14:03:09.319490 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/76b580d8-fd56-40c0-a24a-1a3234d95ca6-config-data\") pod \"glance-db-sync-6blxq\" (UID: \"76b580d8-fd56-40c0-a24a-1a3234d95ca6\") " pod="openstack/glance-db-sync-6blxq" Nov 21 14:03:09 crc kubenswrapper[5133]: I1121 14:03:09.343045 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mr576\" (UniqueName: \"kubernetes.io/projected/76b580d8-fd56-40c0-a24a-1a3234d95ca6-kube-api-access-mr576\") pod \"glance-db-sync-6blxq\" (UID: \"76b580d8-fd56-40c0-a24a-1a3234d95ca6\") " pod="openstack/glance-db-sync-6blxq" Nov 21 14:03:09 crc kubenswrapper[5133]: I1121 14:03:09.343825 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/76b580d8-fd56-40c0-a24a-1a3234d95ca6-combined-ca-bundle\") pod \"glance-db-sync-6blxq\" (UID: \"76b580d8-fd56-40c0-a24a-1a3234d95ca6\") " pod="openstack/glance-db-sync-6blxq" Nov 21 14:03:09 crc kubenswrapper[5133]: I1121 14:03:09.400513 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-6blxq" Nov 21 14:03:09 crc kubenswrapper[5133]: I1121 14:03:09.411262 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/7e4ac98c-2e0d-45a0-94c0-d683f7f7a737-var-run\") pod \"ovn-controller-2ckfn-config-4w7kq\" (UID: \"7e4ac98c-2e0d-45a0-94c0-d683f7f7a737\") " pod="openstack/ovn-controller-2ckfn-config-4w7kq" Nov 21 14:03:09 crc kubenswrapper[5133]: I1121 14:03:09.411335 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/7e4ac98c-2e0d-45a0-94c0-d683f7f7a737-var-run-ovn\") pod \"ovn-controller-2ckfn-config-4w7kq\" (UID: \"7e4ac98c-2e0d-45a0-94c0-d683f7f7a737\") " pod="openstack/ovn-controller-2ckfn-config-4w7kq" Nov 21 14:03:09 crc kubenswrapper[5133]: I1121 14:03:09.411402 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/7e4ac98c-2e0d-45a0-94c0-d683f7f7a737-additional-scripts\") pod \"ovn-controller-2ckfn-config-4w7kq\" (UID: \"7e4ac98c-2e0d-45a0-94c0-d683f7f7a737\") " pod="openstack/ovn-controller-2ckfn-config-4w7kq" Nov 21 14:03:09 crc kubenswrapper[5133]: I1121 14:03:09.411456 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7gq9x\" (UniqueName: \"kubernetes.io/projected/7e4ac98c-2e0d-45a0-94c0-d683f7f7a737-kube-api-access-7gq9x\") pod \"ovn-controller-2ckfn-config-4w7kq\" (UID: \"7e4ac98c-2e0d-45a0-94c0-d683f7f7a737\") " pod="openstack/ovn-controller-2ckfn-config-4w7kq" Nov 21 14:03:09 crc kubenswrapper[5133]: I1121 14:03:09.411553 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/7e4ac98c-2e0d-45a0-94c0-d683f7f7a737-var-log-ovn\") pod \"ovn-controller-2ckfn-config-4w7kq\" (UID: \"7e4ac98c-2e0d-45a0-94c0-d683f7f7a737\") " pod="openstack/ovn-controller-2ckfn-config-4w7kq" Nov 21 14:03:09 crc kubenswrapper[5133]: I1121 14:03:09.411574 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7e4ac98c-2e0d-45a0-94c0-d683f7f7a737-scripts\") pod \"ovn-controller-2ckfn-config-4w7kq\" (UID: \"7e4ac98c-2e0d-45a0-94c0-d683f7f7a737\") " pod="openstack/ovn-controller-2ckfn-config-4w7kq" Nov 21 14:03:09 crc kubenswrapper[5133]: I1121 14:03:09.411815 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/7e4ac98c-2e0d-45a0-94c0-d683f7f7a737-var-run\") pod \"ovn-controller-2ckfn-config-4w7kq\" (UID: \"7e4ac98c-2e0d-45a0-94c0-d683f7f7a737\") " pod="openstack/ovn-controller-2ckfn-config-4w7kq" Nov 21 14:03:09 crc kubenswrapper[5133]: I1121 14:03:09.411840 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/7e4ac98c-2e0d-45a0-94c0-d683f7f7a737-var-run-ovn\") pod \"ovn-controller-2ckfn-config-4w7kq\" (UID: \"7e4ac98c-2e0d-45a0-94c0-d683f7f7a737\") " pod="openstack/ovn-controller-2ckfn-config-4w7kq" Nov 21 14:03:09 crc kubenswrapper[5133]: I1121 14:03:09.411933 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/7e4ac98c-2e0d-45a0-94c0-d683f7f7a737-var-log-ovn\") pod \"ovn-controller-2ckfn-config-4w7kq\" (UID: \"7e4ac98c-2e0d-45a0-94c0-d683f7f7a737\") " pod="openstack/ovn-controller-2ckfn-config-4w7kq" Nov 21 14:03:09 crc kubenswrapper[5133]: I1121 14:03:09.412456 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/7e4ac98c-2e0d-45a0-94c0-d683f7f7a737-additional-scripts\") pod \"ovn-controller-2ckfn-config-4w7kq\" (UID: \"7e4ac98c-2e0d-45a0-94c0-d683f7f7a737\") " pod="openstack/ovn-controller-2ckfn-config-4w7kq" Nov 21 14:03:09 crc kubenswrapper[5133]: I1121 14:03:09.413556 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7e4ac98c-2e0d-45a0-94c0-d683f7f7a737-scripts\") pod \"ovn-controller-2ckfn-config-4w7kq\" (UID: \"7e4ac98c-2e0d-45a0-94c0-d683f7f7a737\") " pod="openstack/ovn-controller-2ckfn-config-4w7kq" Nov 21 14:03:09 crc kubenswrapper[5133]: I1121 14:03:09.434486 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7gq9x\" (UniqueName: \"kubernetes.io/projected/7e4ac98c-2e0d-45a0-94c0-d683f7f7a737-kube-api-access-7gq9x\") pod \"ovn-controller-2ckfn-config-4w7kq\" (UID: \"7e4ac98c-2e0d-45a0-94c0-d683f7f7a737\") " pod="openstack/ovn-controller-2ckfn-config-4w7kq" Nov 21 14:03:09 crc kubenswrapper[5133]: I1121 14:03:09.600228 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-2ckfn-config-4w7kq" Nov 21 14:03:10 crc kubenswrapper[5133]: I1121 14:03:10.008656 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-6blxq"] Nov 21 14:03:10 crc kubenswrapper[5133]: W1121 14:03:10.102232 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7e4ac98c_2e0d_45a0_94c0_d683f7f7a737.slice/crio-36f2801c58ed763adc4fefa54e1592c49994f20c04af991d2ad86d77a209b48e WatchSource:0}: Error finding container 36f2801c58ed763adc4fefa54e1592c49994f20c04af991d2ad86d77a209b48e: Status 404 returned error can't find the container with id 36f2801c58ed763adc4fefa54e1592c49994f20c04af991d2ad86d77a209b48e Nov 21 14:03:10 crc kubenswrapper[5133]: I1121 14:03:10.107691 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-2ckfn-config-4w7kq"] Nov 21 14:03:10 crc kubenswrapper[5133]: I1121 14:03:10.925938 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-6blxq" event={"ID":"76b580d8-fd56-40c0-a24a-1a3234d95ca6","Type":"ContainerStarted","Data":"6d9538441e981f10dc0bf28853063e4e5b3f0233709e0df3963660e88a5ce489"} Nov 21 14:03:10 crc kubenswrapper[5133]: I1121 14:03:10.940306 5133 generic.go:334] "Generic (PLEG): container finished" podID="9aa1caed-f687-4526-a851-59b4d192b705" containerID="12333d78e0e6447e201e2ddaf5c5f5c05242a23fc2120dc8a1be00a6d2499df8" exitCode=0 Nov 21 14:03:10 crc kubenswrapper[5133]: I1121 14:03:10.940378 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"9aa1caed-f687-4526-a851-59b4d192b705","Type":"ContainerDied","Data":"12333d78e0e6447e201e2ddaf5c5f5c05242a23fc2120dc8a1be00a6d2499df8"} Nov 21 14:03:10 crc kubenswrapper[5133]: I1121 14:03:10.947818 5133 generic.go:334] "Generic (PLEG): container finished" podID="7e4ac98c-2e0d-45a0-94c0-d683f7f7a737" containerID="1dbbb3ff6d6a17bec5483a3a76d5b864f4fd3de1c63597309c4c89c6ebee9b7d" exitCode=0 Nov 21 14:03:10 crc kubenswrapper[5133]: I1121 14:03:10.947859 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-2ckfn-config-4w7kq" event={"ID":"7e4ac98c-2e0d-45a0-94c0-d683f7f7a737","Type":"ContainerDied","Data":"1dbbb3ff6d6a17bec5483a3a76d5b864f4fd3de1c63597309c4c89c6ebee9b7d"} Nov 21 14:03:10 crc kubenswrapper[5133]: I1121 14:03:10.947881 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-2ckfn-config-4w7kq" event={"ID":"7e4ac98c-2e0d-45a0-94c0-d683f7f7a737","Type":"ContainerStarted","Data":"36f2801c58ed763adc4fefa54e1592c49994f20c04af991d2ad86d77a209b48e"} Nov 21 14:03:11 crc kubenswrapper[5133]: I1121 14:03:11.958430 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"9aa1caed-f687-4526-a851-59b4d192b705","Type":"ContainerStarted","Data":"bd3fe1e8202ee70b5ddcd1ed9c071a0cb93f6beae9a86622fc0017a4dd4a1716"} Nov 21 14:03:11 crc kubenswrapper[5133]: I1121 14:03:11.959257 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 21 14:03:11 crc kubenswrapper[5133]: I1121 14:03:11.961068 5133 generic.go:334] "Generic (PLEG): container finished" podID="2c9873f2-025e-499f-8a76-47c38495fd75" containerID="7743d3ae5f3d1730d9ae8514c383685ab59cb65808a01679f0e73dc16f30e218" exitCode=0 Nov 21 14:03:11 crc kubenswrapper[5133]: I1121 14:03:11.961219 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"2c9873f2-025e-499f-8a76-47c38495fd75","Type":"ContainerDied","Data":"7743d3ae5f3d1730d9ae8514c383685ab59cb65808a01679f0e73dc16f30e218"} Nov 21 14:03:11 crc kubenswrapper[5133]: I1121 14:03:11.993900 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=37.508557239 podStartE2EDuration="1m13.993876217s" podCreationTimestamp="2025-11-21 14:01:58 +0000 UTC" firstStartedPulling="2025-11-21 14:02:00.919646268 +0000 UTC m=+1180.717478516" lastFinishedPulling="2025-11-21 14:02:37.404965206 +0000 UTC m=+1217.202797494" observedRunningTime="2025-11-21 14:03:11.990437205 +0000 UTC m=+1251.788269453" watchObservedRunningTime="2025-11-21 14:03:11.993876217 +0000 UTC m=+1251.791708465" Nov 21 14:03:12 crc kubenswrapper[5133]: I1121 14:03:12.314059 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-2ckfn-config-4w7kq" Nov 21 14:03:12 crc kubenswrapper[5133]: I1121 14:03:12.481713 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/7e4ac98c-2e0d-45a0-94c0-d683f7f7a737-var-run\") pod \"7e4ac98c-2e0d-45a0-94c0-d683f7f7a737\" (UID: \"7e4ac98c-2e0d-45a0-94c0-d683f7f7a737\") " Nov 21 14:03:12 crc kubenswrapper[5133]: I1121 14:03:12.481802 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7e4ac98c-2e0d-45a0-94c0-d683f7f7a737-scripts\") pod \"7e4ac98c-2e0d-45a0-94c0-d683f7f7a737\" (UID: \"7e4ac98c-2e0d-45a0-94c0-d683f7f7a737\") " Nov 21 14:03:12 crc kubenswrapper[5133]: I1121 14:03:12.481820 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/7e4ac98c-2e0d-45a0-94c0-d683f7f7a737-var-run-ovn\") pod \"7e4ac98c-2e0d-45a0-94c0-d683f7f7a737\" (UID: \"7e4ac98c-2e0d-45a0-94c0-d683f7f7a737\") " Nov 21 14:03:12 crc kubenswrapper[5133]: I1121 14:03:12.481863 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7gq9x\" (UniqueName: \"kubernetes.io/projected/7e4ac98c-2e0d-45a0-94c0-d683f7f7a737-kube-api-access-7gq9x\") pod \"7e4ac98c-2e0d-45a0-94c0-d683f7f7a737\" (UID: \"7e4ac98c-2e0d-45a0-94c0-d683f7f7a737\") " Nov 21 14:03:12 crc kubenswrapper[5133]: I1121 14:03:12.481916 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7e4ac98c-2e0d-45a0-94c0-d683f7f7a737-var-run" (OuterVolumeSpecName: "var-run") pod "7e4ac98c-2e0d-45a0-94c0-d683f7f7a737" (UID: "7e4ac98c-2e0d-45a0-94c0-d683f7f7a737"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 14:03:12 crc kubenswrapper[5133]: I1121 14:03:12.481947 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7e4ac98c-2e0d-45a0-94c0-d683f7f7a737-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "7e4ac98c-2e0d-45a0-94c0-d683f7f7a737" (UID: "7e4ac98c-2e0d-45a0-94c0-d683f7f7a737"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 14:03:12 crc kubenswrapper[5133]: I1121 14:03:12.482081 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/7e4ac98c-2e0d-45a0-94c0-d683f7f7a737-additional-scripts\") pod \"7e4ac98c-2e0d-45a0-94c0-d683f7f7a737\" (UID: \"7e4ac98c-2e0d-45a0-94c0-d683f7f7a737\") " Nov 21 14:03:12 crc kubenswrapper[5133]: I1121 14:03:12.482181 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/7e4ac98c-2e0d-45a0-94c0-d683f7f7a737-var-log-ovn\") pod \"7e4ac98c-2e0d-45a0-94c0-d683f7f7a737\" (UID: \"7e4ac98c-2e0d-45a0-94c0-d683f7f7a737\") " Nov 21 14:03:12 crc kubenswrapper[5133]: I1121 14:03:12.482604 5133 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/7e4ac98c-2e0d-45a0-94c0-d683f7f7a737-var-run\") on node \"crc\" DevicePath \"\"" Nov 21 14:03:12 crc kubenswrapper[5133]: I1121 14:03:12.482621 5133 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/7e4ac98c-2e0d-45a0-94c0-d683f7f7a737-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 21 14:03:12 crc kubenswrapper[5133]: I1121 14:03:12.482677 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7e4ac98c-2e0d-45a0-94c0-d683f7f7a737-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "7e4ac98c-2e0d-45a0-94c0-d683f7f7a737" (UID: "7e4ac98c-2e0d-45a0-94c0-d683f7f7a737"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 14:03:12 crc kubenswrapper[5133]: I1121 14:03:12.483428 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7e4ac98c-2e0d-45a0-94c0-d683f7f7a737-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "7e4ac98c-2e0d-45a0-94c0-d683f7f7a737" (UID: "7e4ac98c-2e0d-45a0-94c0-d683f7f7a737"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:03:12 crc kubenswrapper[5133]: I1121 14:03:12.484936 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7e4ac98c-2e0d-45a0-94c0-d683f7f7a737-scripts" (OuterVolumeSpecName: "scripts") pod "7e4ac98c-2e0d-45a0-94c0-d683f7f7a737" (UID: "7e4ac98c-2e0d-45a0-94c0-d683f7f7a737"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:03:12 crc kubenswrapper[5133]: I1121 14:03:12.487944 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7e4ac98c-2e0d-45a0-94c0-d683f7f7a737-kube-api-access-7gq9x" (OuterVolumeSpecName: "kube-api-access-7gq9x") pod "7e4ac98c-2e0d-45a0-94c0-d683f7f7a737" (UID: "7e4ac98c-2e0d-45a0-94c0-d683f7f7a737"). InnerVolumeSpecName "kube-api-access-7gq9x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:03:12 crc kubenswrapper[5133]: I1121 14:03:12.584189 5133 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/7e4ac98c-2e0d-45a0-94c0-d683f7f7a737-additional-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 14:03:12 crc kubenswrapper[5133]: I1121 14:03:12.584233 5133 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/7e4ac98c-2e0d-45a0-94c0-d683f7f7a737-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 21 14:03:12 crc kubenswrapper[5133]: I1121 14:03:12.584243 5133 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7e4ac98c-2e0d-45a0-94c0-d683f7f7a737-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 14:03:12 crc kubenswrapper[5133]: I1121 14:03:12.584253 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7gq9x\" (UniqueName: \"kubernetes.io/projected/7e4ac98c-2e0d-45a0-94c0-d683f7f7a737-kube-api-access-7gq9x\") on node \"crc\" DevicePath \"\"" Nov 21 14:03:12 crc kubenswrapper[5133]: I1121 14:03:12.980369 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-2ckfn-config-4w7kq" event={"ID":"7e4ac98c-2e0d-45a0-94c0-d683f7f7a737","Type":"ContainerDied","Data":"36f2801c58ed763adc4fefa54e1592c49994f20c04af991d2ad86d77a209b48e"} Nov 21 14:03:12 crc kubenswrapper[5133]: I1121 14:03:12.980435 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="36f2801c58ed763adc4fefa54e1592c49994f20c04af991d2ad86d77a209b48e" Nov 21 14:03:12 crc kubenswrapper[5133]: I1121 14:03:12.980391 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-2ckfn-config-4w7kq" Nov 21 14:03:12 crc kubenswrapper[5133]: I1121 14:03:12.983162 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"2c9873f2-025e-499f-8a76-47c38495fd75","Type":"ContainerStarted","Data":"e453506f9e3a42b8e238b5fe885a82325983fce8b3d35ff909c123419ee46015"} Nov 21 14:03:12 crc kubenswrapper[5133]: I1121 14:03:12.983584 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:03:13 crc kubenswrapper[5133]: I1121 14:03:13.025538 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=37.776355151 podStartE2EDuration="1m14.025509823s" podCreationTimestamp="2025-11-21 14:01:59 +0000 UTC" firstStartedPulling="2025-11-21 14:02:01.206793788 +0000 UTC m=+1181.004626026" lastFinishedPulling="2025-11-21 14:02:37.45594845 +0000 UTC m=+1217.253780698" observedRunningTime="2025-11-21 14:03:13.016648446 +0000 UTC m=+1252.814480694" watchObservedRunningTime="2025-11-21 14:03:13.025509823 +0000 UTC m=+1252.823342091" Nov 21 14:03:13 crc kubenswrapper[5133]: I1121 14:03:13.447331 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-2ckfn-config-4w7kq"] Nov 21 14:03:13 crc kubenswrapper[5133]: I1121 14:03:13.460082 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-2ckfn-config-4w7kq"] Nov 21 14:03:13 crc kubenswrapper[5133]: I1121 14:03:13.972706 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-2ckfn" Nov 21 14:03:14 crc kubenswrapper[5133]: I1121 14:03:14.498984 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7e4ac98c-2e0d-45a0-94c0-d683f7f7a737" path="/var/lib/kubelet/pods/7e4ac98c-2e0d-45a0-94c0-d683f7f7a737/volumes" Nov 21 14:03:23 crc kubenswrapper[5133]: I1121 14:03:23.310705 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 14:03:23 crc kubenswrapper[5133]: I1121 14:03:23.311613 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 14:03:23 crc kubenswrapper[5133]: I1121 14:03:23.311696 5133 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" Nov 21 14:03:23 crc kubenswrapper[5133]: I1121 14:03:23.312415 5133 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"5883b2ffbdc225f8fc9c34f308aadba5798cb012e313b8c25cad57a148a69dbc"} pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 14:03:23 crc kubenswrapper[5133]: I1121 14:03:23.312482 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" containerID="cri-o://5883b2ffbdc225f8fc9c34f308aadba5798cb012e313b8c25cad57a148a69dbc" gracePeriod=600 Nov 21 14:03:24 crc kubenswrapper[5133]: I1121 14:03:24.092299 5133 generic.go:334] "Generic (PLEG): container finished" podID="52f5a729-05d1-4f84-a216-1df3233af57d" containerID="5883b2ffbdc225f8fc9c34f308aadba5798cb012e313b8c25cad57a148a69dbc" exitCode=0 Nov 21 14:03:24 crc kubenswrapper[5133]: I1121 14:03:24.092384 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" event={"ID":"52f5a729-05d1-4f84-a216-1df3233af57d","Type":"ContainerDied","Data":"5883b2ffbdc225f8fc9c34f308aadba5798cb012e313b8c25cad57a148a69dbc"} Nov 21 14:03:24 crc kubenswrapper[5133]: I1121 14:03:24.092794 5133 scope.go:117] "RemoveContainer" containerID="aeb58eef34dec1c617396ba99663df9426cb6865e9e12764d8c5ffc72116a175" Nov 21 14:03:26 crc kubenswrapper[5133]: E1121 14:03:26.706982 5133 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-glance-api:current-podified" Nov 21 14:03:26 crc kubenswrapper[5133]: E1121 14:03:26.707648 5133 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:glance-db-sync,Image:quay.io/podified-antelope-centos9/openstack-glance-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/glance/glance.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-mr576,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42415,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42415,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-db-sync-6blxq_openstack(76b580d8-fd56-40c0-a24a-1a3234d95ca6): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 21 14:03:26 crc kubenswrapper[5133]: E1121 14:03:26.708922 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/glance-db-sync-6blxq" podUID="76b580d8-fd56-40c0-a24a-1a3234d95ca6" Nov 21 14:03:27 crc kubenswrapper[5133]: I1121 14:03:27.133281 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" event={"ID":"52f5a729-05d1-4f84-a216-1df3233af57d","Type":"ContainerStarted","Data":"edc5f6668b324bafe1f33484d097fa3472daf7be070419d53434fd44d4c45cd9"} Nov 21 14:03:27 crc kubenswrapper[5133]: E1121 14:03:27.134410 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-glance-api:current-podified\\\"\"" pod="openstack/glance-db-sync-6blxq" podUID="76b580d8-fd56-40c0-a24a-1a3234d95ca6" Nov 21 14:03:30 crc kubenswrapper[5133]: I1121 14:03:30.175305 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 21 14:03:30 crc kubenswrapper[5133]: I1121 14:03:30.544248 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:03:30 crc kubenswrapper[5133]: I1121 14:03:30.656232 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-7vjhl"] Nov 21 14:03:30 crc kubenswrapper[5133]: E1121 14:03:30.656661 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e4ac98c-2e0d-45a0-94c0-d683f7f7a737" containerName="ovn-config" Nov 21 14:03:30 crc kubenswrapper[5133]: I1121 14:03:30.656679 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e4ac98c-2e0d-45a0-94c0-d683f7f7a737" containerName="ovn-config" Nov 21 14:03:30 crc kubenswrapper[5133]: I1121 14:03:30.656849 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="7e4ac98c-2e0d-45a0-94c0-d683f7f7a737" containerName="ovn-config" Nov 21 14:03:30 crc kubenswrapper[5133]: I1121 14:03:30.657502 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-7vjhl" Nov 21 14:03:30 crc kubenswrapper[5133]: I1121 14:03:30.744370 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-7vjhl"] Nov 21 14:03:30 crc kubenswrapper[5133]: I1121 14:03:30.758058 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-f69d-account-create-6ct9x"] Nov 21 14:03:30 crc kubenswrapper[5133]: I1121 14:03:30.759169 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-f69d-account-create-6ct9x" Nov 21 14:03:30 crc kubenswrapper[5133]: I1121 14:03:30.762377 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Nov 21 14:03:30 crc kubenswrapper[5133]: I1121 14:03:30.769128 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-fj9b9"] Nov 21 14:03:30 crc kubenswrapper[5133]: I1121 14:03:30.770227 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-fj9b9" Nov 21 14:03:30 crc kubenswrapper[5133]: I1121 14:03:30.775056 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vrt6d\" (UniqueName: \"kubernetes.io/projected/d32a6944-2567-4e79-9fee-d0aecb16f40e-kube-api-access-vrt6d\") pod \"cinder-db-create-7vjhl\" (UID: \"d32a6944-2567-4e79-9fee-d0aecb16f40e\") " pod="openstack/cinder-db-create-7vjhl" Nov 21 14:03:30 crc kubenswrapper[5133]: I1121 14:03:30.775127 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5xfd5\" (UniqueName: \"kubernetes.io/projected/ab41e086-5fe6-40db-9293-7f8e01f43b08-kube-api-access-5xfd5\") pod \"barbican-f69d-account-create-6ct9x\" (UID: \"ab41e086-5fe6-40db-9293-7f8e01f43b08\") " pod="openstack/barbican-f69d-account-create-6ct9x" Nov 21 14:03:30 crc kubenswrapper[5133]: I1121 14:03:30.775212 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d32a6944-2567-4e79-9fee-d0aecb16f40e-operator-scripts\") pod \"cinder-db-create-7vjhl\" (UID: \"d32a6944-2567-4e79-9fee-d0aecb16f40e\") " pod="openstack/cinder-db-create-7vjhl" Nov 21 14:03:30 crc kubenswrapper[5133]: I1121 14:03:30.775259 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ab41e086-5fe6-40db-9293-7f8e01f43b08-operator-scripts\") pod \"barbican-f69d-account-create-6ct9x\" (UID: \"ab41e086-5fe6-40db-9293-7f8e01f43b08\") " pod="openstack/barbican-f69d-account-create-6ct9x" Nov 21 14:03:30 crc kubenswrapper[5133]: I1121 14:03:30.790682 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-fj9b9"] Nov 21 14:03:30 crc kubenswrapper[5133]: I1121 14:03:30.845647 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-f69d-account-create-6ct9x"] Nov 21 14:03:30 crc kubenswrapper[5133]: I1121 14:03:30.871895 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-18e5-account-create-rmg6q"] Nov 21 14:03:30 crc kubenswrapper[5133]: I1121 14:03:30.873258 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-18e5-account-create-rmg6q" Nov 21 14:03:30 crc kubenswrapper[5133]: I1121 14:03:30.876223 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Nov 21 14:03:30 crc kubenswrapper[5133]: I1121 14:03:30.877120 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vrt6d\" (UniqueName: \"kubernetes.io/projected/d32a6944-2567-4e79-9fee-d0aecb16f40e-kube-api-access-vrt6d\") pod \"cinder-db-create-7vjhl\" (UID: \"d32a6944-2567-4e79-9fee-d0aecb16f40e\") " pod="openstack/cinder-db-create-7vjhl" Nov 21 14:03:30 crc kubenswrapper[5133]: I1121 14:03:30.877170 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/43b30a4a-3bf5-4a77-9422-01336a998d47-operator-scripts\") pod \"barbican-db-create-fj9b9\" (UID: \"43b30a4a-3bf5-4a77-9422-01336a998d47\") " pod="openstack/barbican-db-create-fj9b9" Nov 21 14:03:30 crc kubenswrapper[5133]: I1121 14:03:30.877229 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5xfd5\" (UniqueName: \"kubernetes.io/projected/ab41e086-5fe6-40db-9293-7f8e01f43b08-kube-api-access-5xfd5\") pod \"barbican-f69d-account-create-6ct9x\" (UID: \"ab41e086-5fe6-40db-9293-7f8e01f43b08\") " pod="openstack/barbican-f69d-account-create-6ct9x" Nov 21 14:03:30 crc kubenswrapper[5133]: I1121 14:03:30.877284 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d32a6944-2567-4e79-9fee-d0aecb16f40e-operator-scripts\") pod \"cinder-db-create-7vjhl\" (UID: \"d32a6944-2567-4e79-9fee-d0aecb16f40e\") " pod="openstack/cinder-db-create-7vjhl" Nov 21 14:03:30 crc kubenswrapper[5133]: I1121 14:03:30.877340 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ab41e086-5fe6-40db-9293-7f8e01f43b08-operator-scripts\") pod \"barbican-f69d-account-create-6ct9x\" (UID: \"ab41e086-5fe6-40db-9293-7f8e01f43b08\") " pod="openstack/barbican-f69d-account-create-6ct9x" Nov 21 14:03:30 crc kubenswrapper[5133]: I1121 14:03:30.877400 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q5d7r\" (UniqueName: \"kubernetes.io/projected/43b30a4a-3bf5-4a77-9422-01336a998d47-kube-api-access-q5d7r\") pod \"barbican-db-create-fj9b9\" (UID: \"43b30a4a-3bf5-4a77-9422-01336a998d47\") " pod="openstack/barbican-db-create-fj9b9" Nov 21 14:03:30 crc kubenswrapper[5133]: I1121 14:03:30.878338 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ab41e086-5fe6-40db-9293-7f8e01f43b08-operator-scripts\") pod \"barbican-f69d-account-create-6ct9x\" (UID: \"ab41e086-5fe6-40db-9293-7f8e01f43b08\") " pod="openstack/barbican-f69d-account-create-6ct9x" Nov 21 14:03:30 crc kubenswrapper[5133]: I1121 14:03:30.878394 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d32a6944-2567-4e79-9fee-d0aecb16f40e-operator-scripts\") pod \"cinder-db-create-7vjhl\" (UID: \"d32a6944-2567-4e79-9fee-d0aecb16f40e\") " pod="openstack/cinder-db-create-7vjhl" Nov 21 14:03:30 crc kubenswrapper[5133]: I1121 14:03:30.901321 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-18e5-account-create-rmg6q"] Nov 21 14:03:30 crc kubenswrapper[5133]: I1121 14:03:30.931672 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vrt6d\" (UniqueName: \"kubernetes.io/projected/d32a6944-2567-4e79-9fee-d0aecb16f40e-kube-api-access-vrt6d\") pod \"cinder-db-create-7vjhl\" (UID: \"d32a6944-2567-4e79-9fee-d0aecb16f40e\") " pod="openstack/cinder-db-create-7vjhl" Nov 21 14:03:30 crc kubenswrapper[5133]: I1121 14:03:30.940639 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5xfd5\" (UniqueName: \"kubernetes.io/projected/ab41e086-5fe6-40db-9293-7f8e01f43b08-kube-api-access-5xfd5\") pod \"barbican-f69d-account-create-6ct9x\" (UID: \"ab41e086-5fe6-40db-9293-7f8e01f43b08\") " pod="openstack/barbican-f69d-account-create-6ct9x" Nov 21 14:03:30 crc kubenswrapper[5133]: I1121 14:03:30.977502 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-7vjhl" Nov 21 14:03:30 crc kubenswrapper[5133]: I1121 14:03:30.986820 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/43b30a4a-3bf5-4a77-9422-01336a998d47-operator-scripts\") pod \"barbican-db-create-fj9b9\" (UID: \"43b30a4a-3bf5-4a77-9422-01336a998d47\") " pod="openstack/barbican-db-create-fj9b9" Nov 21 14:03:30 crc kubenswrapper[5133]: I1121 14:03:30.995084 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q5d7r\" (UniqueName: \"kubernetes.io/projected/43b30a4a-3bf5-4a77-9422-01336a998d47-kube-api-access-q5d7r\") pod \"barbican-db-create-fj9b9\" (UID: \"43b30a4a-3bf5-4a77-9422-01336a998d47\") " pod="openstack/barbican-db-create-fj9b9" Nov 21 14:03:31 crc kubenswrapper[5133]: I1121 14:03:30.998160 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/43b30a4a-3bf5-4a77-9422-01336a998d47-operator-scripts\") pod \"barbican-db-create-fj9b9\" (UID: \"43b30a4a-3bf5-4a77-9422-01336a998d47\") " pod="openstack/barbican-db-create-fj9b9" Nov 21 14:03:31 crc kubenswrapper[5133]: I1121 14:03:31.060569 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q5d7r\" (UniqueName: \"kubernetes.io/projected/43b30a4a-3bf5-4a77-9422-01336a998d47-kube-api-access-q5d7r\") pod \"barbican-db-create-fj9b9\" (UID: \"43b30a4a-3bf5-4a77-9422-01336a998d47\") " pod="openstack/barbican-db-create-fj9b9" Nov 21 14:03:31 crc kubenswrapper[5133]: I1121 14:03:31.078258 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-ghlcg"] Nov 21 14:03:31 crc kubenswrapper[5133]: I1121 14:03:31.089577 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-ghlcg" Nov 21 14:03:31 crc kubenswrapper[5133]: I1121 14:03:31.090082 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-f69d-account-create-6ct9x" Nov 21 14:03:31 crc kubenswrapper[5133]: I1121 14:03:31.091382 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-fj9b9" Nov 21 14:03:31 crc kubenswrapper[5133]: I1121 14:03:31.099297 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2d89170b-8c95-42fa-90bd-35edc597fc2f-operator-scripts\") pod \"cinder-18e5-account-create-rmg6q\" (UID: \"2d89170b-8c95-42fa-90bd-35edc597fc2f\") " pod="openstack/cinder-18e5-account-create-rmg6q" Nov 21 14:03:31 crc kubenswrapper[5133]: I1121 14:03:31.099451 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k2wn2\" (UniqueName: \"kubernetes.io/projected/2d89170b-8c95-42fa-90bd-35edc597fc2f-kube-api-access-k2wn2\") pod \"cinder-18e5-account-create-rmg6q\" (UID: \"2d89170b-8c95-42fa-90bd-35edc597fc2f\") " pod="openstack/cinder-18e5-account-create-rmg6q" Nov 21 14:03:31 crc kubenswrapper[5133]: I1121 14:03:31.113309 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-ghlcg"] Nov 21 14:03:31 crc kubenswrapper[5133]: I1121 14:03:31.165133 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-tshmb"] Nov 21 14:03:31 crc kubenswrapper[5133]: I1121 14:03:31.166680 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-tshmb" Nov 21 14:03:31 crc kubenswrapper[5133]: I1121 14:03:31.173304 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 21 14:03:31 crc kubenswrapper[5133]: I1121 14:03:31.173617 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-bv5lp" Nov 21 14:03:31 crc kubenswrapper[5133]: I1121 14:03:31.173717 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 21 14:03:31 crc kubenswrapper[5133]: I1121 14:03:31.173885 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 21 14:03:31 crc kubenswrapper[5133]: I1121 14:03:31.188461 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-tshmb"] Nov 21 14:03:31 crc kubenswrapper[5133]: I1121 14:03:31.214033 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2d89170b-8c95-42fa-90bd-35edc597fc2f-operator-scripts\") pod \"cinder-18e5-account-create-rmg6q\" (UID: \"2d89170b-8c95-42fa-90bd-35edc597fc2f\") " pod="openstack/cinder-18e5-account-create-rmg6q" Nov 21 14:03:31 crc kubenswrapper[5133]: I1121 14:03:31.214402 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b48c94e4-c8f8-42fb-90b3-d300991ac5d6-operator-scripts\") pod \"neutron-db-create-ghlcg\" (UID: \"b48c94e4-c8f8-42fb-90b3-d300991ac5d6\") " pod="openstack/neutron-db-create-ghlcg" Nov 21 14:03:31 crc kubenswrapper[5133]: I1121 14:03:31.216072 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-47sr2\" (UniqueName: \"kubernetes.io/projected/b48c94e4-c8f8-42fb-90b3-d300991ac5d6-kube-api-access-47sr2\") pod \"neutron-db-create-ghlcg\" (UID: \"b48c94e4-c8f8-42fb-90b3-d300991ac5d6\") " pod="openstack/neutron-db-create-ghlcg" Nov 21 14:03:31 crc kubenswrapper[5133]: I1121 14:03:31.216171 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k2wn2\" (UniqueName: \"kubernetes.io/projected/2d89170b-8c95-42fa-90bd-35edc597fc2f-kube-api-access-k2wn2\") pod \"cinder-18e5-account-create-rmg6q\" (UID: \"2d89170b-8c95-42fa-90bd-35edc597fc2f\") " pod="openstack/cinder-18e5-account-create-rmg6q" Nov 21 14:03:31 crc kubenswrapper[5133]: I1121 14:03:31.219969 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2d89170b-8c95-42fa-90bd-35edc597fc2f-operator-scripts\") pod \"cinder-18e5-account-create-rmg6q\" (UID: \"2d89170b-8c95-42fa-90bd-35edc597fc2f\") " pod="openstack/cinder-18e5-account-create-rmg6q" Nov 21 14:03:31 crc kubenswrapper[5133]: I1121 14:03:31.240669 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k2wn2\" (UniqueName: \"kubernetes.io/projected/2d89170b-8c95-42fa-90bd-35edc597fc2f-kube-api-access-k2wn2\") pod \"cinder-18e5-account-create-rmg6q\" (UID: \"2d89170b-8c95-42fa-90bd-35edc597fc2f\") " pod="openstack/cinder-18e5-account-create-rmg6q" Nov 21 14:03:31 crc kubenswrapper[5133]: I1121 14:03:31.277851 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-6888-account-create-sk6x7"] Nov 21 14:03:31 crc kubenswrapper[5133]: I1121 14:03:31.279799 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-6888-account-create-sk6x7" Nov 21 14:03:31 crc kubenswrapper[5133]: I1121 14:03:31.289391 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Nov 21 14:03:31 crc kubenswrapper[5133]: I1121 14:03:31.291629 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-6888-account-create-sk6x7"] Nov 21 14:03:31 crc kubenswrapper[5133]: I1121 14:03:31.318910 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/12e06aee-ac34-4d50-aa07-42f326d55fb2-config-data\") pod \"keystone-db-sync-tshmb\" (UID: \"12e06aee-ac34-4d50-aa07-42f326d55fb2\") " pod="openstack/keystone-db-sync-tshmb" Nov 21 14:03:31 crc kubenswrapper[5133]: I1121 14:03:31.318985 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/12e06aee-ac34-4d50-aa07-42f326d55fb2-combined-ca-bundle\") pod \"keystone-db-sync-tshmb\" (UID: \"12e06aee-ac34-4d50-aa07-42f326d55fb2\") " pod="openstack/keystone-db-sync-tshmb" Nov 21 14:03:31 crc kubenswrapper[5133]: I1121 14:03:31.320607 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kw9rp\" (UniqueName: \"kubernetes.io/projected/12e06aee-ac34-4d50-aa07-42f326d55fb2-kube-api-access-kw9rp\") pod \"keystone-db-sync-tshmb\" (UID: \"12e06aee-ac34-4d50-aa07-42f326d55fb2\") " pod="openstack/keystone-db-sync-tshmb" Nov 21 14:03:31 crc kubenswrapper[5133]: I1121 14:03:31.320694 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b48c94e4-c8f8-42fb-90b3-d300991ac5d6-operator-scripts\") pod \"neutron-db-create-ghlcg\" (UID: \"b48c94e4-c8f8-42fb-90b3-d300991ac5d6\") " pod="openstack/neutron-db-create-ghlcg" Nov 21 14:03:31 crc kubenswrapper[5133]: I1121 14:03:31.320724 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-47sr2\" (UniqueName: \"kubernetes.io/projected/b48c94e4-c8f8-42fb-90b3-d300991ac5d6-kube-api-access-47sr2\") pod \"neutron-db-create-ghlcg\" (UID: \"b48c94e4-c8f8-42fb-90b3-d300991ac5d6\") " pod="openstack/neutron-db-create-ghlcg" Nov 21 14:03:31 crc kubenswrapper[5133]: I1121 14:03:31.322181 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b48c94e4-c8f8-42fb-90b3-d300991ac5d6-operator-scripts\") pod \"neutron-db-create-ghlcg\" (UID: \"b48c94e4-c8f8-42fb-90b3-d300991ac5d6\") " pod="openstack/neutron-db-create-ghlcg" Nov 21 14:03:31 crc kubenswrapper[5133]: I1121 14:03:31.342390 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-47sr2\" (UniqueName: \"kubernetes.io/projected/b48c94e4-c8f8-42fb-90b3-d300991ac5d6-kube-api-access-47sr2\") pod \"neutron-db-create-ghlcg\" (UID: \"b48c94e4-c8f8-42fb-90b3-d300991ac5d6\") " pod="openstack/neutron-db-create-ghlcg" Nov 21 14:03:31 crc kubenswrapper[5133]: I1121 14:03:31.422630 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/12e06aee-ac34-4d50-aa07-42f326d55fb2-config-data\") pod \"keystone-db-sync-tshmb\" (UID: \"12e06aee-ac34-4d50-aa07-42f326d55fb2\") " pod="openstack/keystone-db-sync-tshmb" Nov 21 14:03:31 crc kubenswrapper[5133]: I1121 14:03:31.422694 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fpr7r\" (UniqueName: \"kubernetes.io/projected/a35d3d79-1e17-4750-9246-57363851ddd0-kube-api-access-fpr7r\") pod \"neutron-6888-account-create-sk6x7\" (UID: \"a35d3d79-1e17-4750-9246-57363851ddd0\") " pod="openstack/neutron-6888-account-create-sk6x7" Nov 21 14:03:31 crc kubenswrapper[5133]: I1121 14:03:31.422722 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a35d3d79-1e17-4750-9246-57363851ddd0-operator-scripts\") pod \"neutron-6888-account-create-sk6x7\" (UID: \"a35d3d79-1e17-4750-9246-57363851ddd0\") " pod="openstack/neutron-6888-account-create-sk6x7" Nov 21 14:03:31 crc kubenswrapper[5133]: I1121 14:03:31.422749 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/12e06aee-ac34-4d50-aa07-42f326d55fb2-combined-ca-bundle\") pod \"keystone-db-sync-tshmb\" (UID: \"12e06aee-ac34-4d50-aa07-42f326d55fb2\") " pod="openstack/keystone-db-sync-tshmb" Nov 21 14:03:31 crc kubenswrapper[5133]: I1121 14:03:31.422790 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kw9rp\" (UniqueName: \"kubernetes.io/projected/12e06aee-ac34-4d50-aa07-42f326d55fb2-kube-api-access-kw9rp\") pod \"keystone-db-sync-tshmb\" (UID: \"12e06aee-ac34-4d50-aa07-42f326d55fb2\") " pod="openstack/keystone-db-sync-tshmb" Nov 21 14:03:31 crc kubenswrapper[5133]: I1121 14:03:31.428219 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/12e06aee-ac34-4d50-aa07-42f326d55fb2-config-data\") pod \"keystone-db-sync-tshmb\" (UID: \"12e06aee-ac34-4d50-aa07-42f326d55fb2\") " pod="openstack/keystone-db-sync-tshmb" Nov 21 14:03:31 crc kubenswrapper[5133]: I1121 14:03:31.432250 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/12e06aee-ac34-4d50-aa07-42f326d55fb2-combined-ca-bundle\") pod \"keystone-db-sync-tshmb\" (UID: \"12e06aee-ac34-4d50-aa07-42f326d55fb2\") " pod="openstack/keystone-db-sync-tshmb" Nov 21 14:03:31 crc kubenswrapper[5133]: I1121 14:03:31.444866 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kw9rp\" (UniqueName: \"kubernetes.io/projected/12e06aee-ac34-4d50-aa07-42f326d55fb2-kube-api-access-kw9rp\") pod \"keystone-db-sync-tshmb\" (UID: \"12e06aee-ac34-4d50-aa07-42f326d55fb2\") " pod="openstack/keystone-db-sync-tshmb" Nov 21 14:03:31 crc kubenswrapper[5133]: I1121 14:03:31.451554 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-ghlcg" Nov 21 14:03:31 crc kubenswrapper[5133]: I1121 14:03:31.497919 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-18e5-account-create-rmg6q" Nov 21 14:03:31 crc kubenswrapper[5133]: I1121 14:03:31.524373 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fpr7r\" (UniqueName: \"kubernetes.io/projected/a35d3d79-1e17-4750-9246-57363851ddd0-kube-api-access-fpr7r\") pod \"neutron-6888-account-create-sk6x7\" (UID: \"a35d3d79-1e17-4750-9246-57363851ddd0\") " pod="openstack/neutron-6888-account-create-sk6x7" Nov 21 14:03:31 crc kubenswrapper[5133]: I1121 14:03:31.524428 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a35d3d79-1e17-4750-9246-57363851ddd0-operator-scripts\") pod \"neutron-6888-account-create-sk6x7\" (UID: \"a35d3d79-1e17-4750-9246-57363851ddd0\") " pod="openstack/neutron-6888-account-create-sk6x7" Nov 21 14:03:31 crc kubenswrapper[5133]: I1121 14:03:31.525762 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a35d3d79-1e17-4750-9246-57363851ddd0-operator-scripts\") pod \"neutron-6888-account-create-sk6x7\" (UID: \"a35d3d79-1e17-4750-9246-57363851ddd0\") " pod="openstack/neutron-6888-account-create-sk6x7" Nov 21 14:03:31 crc kubenswrapper[5133]: I1121 14:03:31.544415 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-tshmb" Nov 21 14:03:31 crc kubenswrapper[5133]: I1121 14:03:31.548778 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fpr7r\" (UniqueName: \"kubernetes.io/projected/a35d3d79-1e17-4750-9246-57363851ddd0-kube-api-access-fpr7r\") pod \"neutron-6888-account-create-sk6x7\" (UID: \"a35d3d79-1e17-4750-9246-57363851ddd0\") " pod="openstack/neutron-6888-account-create-sk6x7" Nov 21 14:03:31 crc kubenswrapper[5133]: I1121 14:03:31.594859 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-fj9b9"] Nov 21 14:03:31 crc kubenswrapper[5133]: W1121 14:03:31.600665 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod43b30a4a_3bf5_4a77_9422_01336a998d47.slice/crio-d17a20a2d9f6e425ffa3089ac2a636b4333deb71cacf6fd38659fdcd936a2fc0 WatchSource:0}: Error finding container d17a20a2d9f6e425ffa3089ac2a636b4333deb71cacf6fd38659fdcd936a2fc0: Status 404 returned error can't find the container with id d17a20a2d9f6e425ffa3089ac2a636b4333deb71cacf6fd38659fdcd936a2fc0 Nov 21 14:03:31 crc kubenswrapper[5133]: I1121 14:03:31.616113 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-6888-account-create-sk6x7" Nov 21 14:03:31 crc kubenswrapper[5133]: I1121 14:03:31.842061 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-ghlcg"] Nov 21 14:03:31 crc kubenswrapper[5133]: W1121 14:03:31.845234 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb48c94e4_c8f8_42fb_90b3_d300991ac5d6.slice/crio-e9433aff936b131ab0a1c2c9b76eac62f924916d8cddf1eb26253bb26a5c1586 WatchSource:0}: Error finding container e9433aff936b131ab0a1c2c9b76eac62f924916d8cddf1eb26253bb26a5c1586: Status 404 returned error can't find the container with id e9433aff936b131ab0a1c2c9b76eac62f924916d8cddf1eb26253bb26a5c1586 Nov 21 14:03:31 crc kubenswrapper[5133]: I1121 14:03:31.848283 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-f69d-account-create-6ct9x"] Nov 21 14:03:31 crc kubenswrapper[5133]: W1121 14:03:31.852581 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd32a6944_2567_4e79_9fee_d0aecb16f40e.slice/crio-c7fa2f91e43a26d4902961961d3f0058e4b0ca409fdcef7c7cedbfa3983fcf52 WatchSource:0}: Error finding container c7fa2f91e43a26d4902961961d3f0058e4b0ca409fdcef7c7cedbfa3983fcf52: Status 404 returned error can't find the container with id c7fa2f91e43a26d4902961961d3f0058e4b0ca409fdcef7c7cedbfa3983fcf52 Nov 21 14:03:31 crc kubenswrapper[5133]: I1121 14:03:31.853639 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-7vjhl"] Nov 21 14:03:31 crc kubenswrapper[5133]: W1121 14:03:31.857653 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podab41e086_5fe6_40db_9293_7f8e01f43b08.slice/crio-2a64c30444497b846f06f596378f1e86ff315930f6ace7a2f346049400d02f92 WatchSource:0}: Error finding container 2a64c30444497b846f06f596378f1e86ff315930f6ace7a2f346049400d02f92: Status 404 returned error can't find the container with id 2a64c30444497b846f06f596378f1e86ff315930f6ace7a2f346049400d02f92 Nov 21 14:03:32 crc kubenswrapper[5133]: I1121 14:03:32.171630 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-18e5-account-create-rmg6q"] Nov 21 14:03:32 crc kubenswrapper[5133]: I1121 14:03:32.184489 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-tshmb"] Nov 21 14:03:32 crc kubenswrapper[5133]: W1121 14:03:32.198166 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2d89170b_8c95_42fa_90bd_35edc597fc2f.slice/crio-99379c5030a5bab8610e39e4e19ea6a166ca5eb6f0d51a30d33a240f57b536e1 WatchSource:0}: Error finding container 99379c5030a5bab8610e39e4e19ea6a166ca5eb6f0d51a30d33a240f57b536e1: Status 404 returned error can't find the container with id 99379c5030a5bab8610e39e4e19ea6a166ca5eb6f0d51a30d33a240f57b536e1 Nov 21 14:03:32 crc kubenswrapper[5133]: I1121 14:03:32.212578 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-ghlcg" event={"ID":"b48c94e4-c8f8-42fb-90b3-d300991ac5d6","Type":"ContainerStarted","Data":"4245b8bbae7058ae2b18cfc715da4741937ddd97a0fe3033bccdcf669791b2b9"} Nov 21 14:03:32 crc kubenswrapper[5133]: I1121 14:03:32.212640 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-ghlcg" event={"ID":"b48c94e4-c8f8-42fb-90b3-d300991ac5d6","Type":"ContainerStarted","Data":"e9433aff936b131ab0a1c2c9b76eac62f924916d8cddf1eb26253bb26a5c1586"} Nov 21 14:03:32 crc kubenswrapper[5133]: I1121 14:03:32.213799 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-7vjhl" event={"ID":"d32a6944-2567-4e79-9fee-d0aecb16f40e","Type":"ContainerStarted","Data":"8f8f8d2a0f69e6b1740fc631a7e66fbc9797fca02d3a2526f4924a14ecba1735"} Nov 21 14:03:32 crc kubenswrapper[5133]: I1121 14:03:32.213825 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-7vjhl" event={"ID":"d32a6944-2567-4e79-9fee-d0aecb16f40e","Type":"ContainerStarted","Data":"c7fa2f91e43a26d4902961961d3f0058e4b0ca409fdcef7c7cedbfa3983fcf52"} Nov 21 14:03:32 crc kubenswrapper[5133]: I1121 14:03:32.217945 5133 generic.go:334] "Generic (PLEG): container finished" podID="43b30a4a-3bf5-4a77-9422-01336a998d47" containerID="3e625539d166fa20ea2699dbd1573ac0028ad111713f065c62258f9e32a9801d" exitCode=0 Nov 21 14:03:32 crc kubenswrapper[5133]: I1121 14:03:32.218508 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-fj9b9" event={"ID":"43b30a4a-3bf5-4a77-9422-01336a998d47","Type":"ContainerDied","Data":"3e625539d166fa20ea2699dbd1573ac0028ad111713f065c62258f9e32a9801d"} Nov 21 14:03:32 crc kubenswrapper[5133]: I1121 14:03:32.218540 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-fj9b9" event={"ID":"43b30a4a-3bf5-4a77-9422-01336a998d47","Type":"ContainerStarted","Data":"d17a20a2d9f6e425ffa3089ac2a636b4333deb71cacf6fd38659fdcd936a2fc0"} Nov 21 14:03:32 crc kubenswrapper[5133]: I1121 14:03:32.224283 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-18e5-account-create-rmg6q" event={"ID":"2d89170b-8c95-42fa-90bd-35edc597fc2f","Type":"ContainerStarted","Data":"99379c5030a5bab8610e39e4e19ea6a166ca5eb6f0d51a30d33a240f57b536e1"} Nov 21 14:03:32 crc kubenswrapper[5133]: I1121 14:03:32.225672 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-tshmb" event={"ID":"12e06aee-ac34-4d50-aa07-42f326d55fb2","Type":"ContainerStarted","Data":"437a799f919d00045cfeabbbf039c0506ec830cc78104dea688b4c7ec4f580bc"} Nov 21 14:03:32 crc kubenswrapper[5133]: I1121 14:03:32.226881 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-f69d-account-create-6ct9x" event={"ID":"ab41e086-5fe6-40db-9293-7f8e01f43b08","Type":"ContainerStarted","Data":"82d4bdea738c4b67f9b4bc1a3b9e96a52ba47d5f691097cd4ff760c1a2c5a272"} Nov 21 14:03:32 crc kubenswrapper[5133]: I1121 14:03:32.226916 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-f69d-account-create-6ct9x" event={"ID":"ab41e086-5fe6-40db-9293-7f8e01f43b08","Type":"ContainerStarted","Data":"2a64c30444497b846f06f596378f1e86ff315930f6ace7a2f346049400d02f92"} Nov 21 14:03:32 crc kubenswrapper[5133]: I1121 14:03:32.263804 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-f69d-account-create-6ct9x" podStartSLOduration=2.263775178 podStartE2EDuration="2.263775178s" podCreationTimestamp="2025-11-21 14:03:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 14:03:32.262315029 +0000 UTC m=+1272.060147277" watchObservedRunningTime="2025-11-21 14:03:32.263775178 +0000 UTC m=+1272.061607426" Nov 21 14:03:32 crc kubenswrapper[5133]: I1121 14:03:32.320868 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-6888-account-create-sk6x7"] Nov 21 14:03:32 crc kubenswrapper[5133]: W1121 14:03:32.335602 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda35d3d79_1e17_4750_9246_57363851ddd0.slice/crio-52a0edca72e32926b626b0f424f03afd19fa008246d270965320ec6721d3d602 WatchSource:0}: Error finding container 52a0edca72e32926b626b0f424f03afd19fa008246d270965320ec6721d3d602: Status 404 returned error can't find the container with id 52a0edca72e32926b626b0f424f03afd19fa008246d270965320ec6721d3d602 Nov 21 14:03:33 crc kubenswrapper[5133]: I1121 14:03:33.242732 5133 generic.go:334] "Generic (PLEG): container finished" podID="d32a6944-2567-4e79-9fee-d0aecb16f40e" containerID="8f8f8d2a0f69e6b1740fc631a7e66fbc9797fca02d3a2526f4924a14ecba1735" exitCode=0 Nov 21 14:03:33 crc kubenswrapper[5133]: I1121 14:03:33.243155 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-7vjhl" event={"ID":"d32a6944-2567-4e79-9fee-d0aecb16f40e","Type":"ContainerDied","Data":"8f8f8d2a0f69e6b1740fc631a7e66fbc9797fca02d3a2526f4924a14ecba1735"} Nov 21 14:03:33 crc kubenswrapper[5133]: I1121 14:03:33.248590 5133 generic.go:334] "Generic (PLEG): container finished" podID="2d89170b-8c95-42fa-90bd-35edc597fc2f" containerID="2f1f90d7f092574e582f9159c77dedddc3e531fe122a1cfc85226f9feaae00c2" exitCode=0 Nov 21 14:03:33 crc kubenswrapper[5133]: I1121 14:03:33.248682 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-18e5-account-create-rmg6q" event={"ID":"2d89170b-8c95-42fa-90bd-35edc597fc2f","Type":"ContainerDied","Data":"2f1f90d7f092574e582f9159c77dedddc3e531fe122a1cfc85226f9feaae00c2"} Nov 21 14:03:33 crc kubenswrapper[5133]: I1121 14:03:33.250626 5133 generic.go:334] "Generic (PLEG): container finished" podID="a35d3d79-1e17-4750-9246-57363851ddd0" containerID="6689a9fbf1b6604c2250085a46f175acbea7ce5690f920e0efeed92fe57ae38b" exitCode=0 Nov 21 14:03:33 crc kubenswrapper[5133]: I1121 14:03:33.250700 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6888-account-create-sk6x7" event={"ID":"a35d3d79-1e17-4750-9246-57363851ddd0","Type":"ContainerDied","Data":"6689a9fbf1b6604c2250085a46f175acbea7ce5690f920e0efeed92fe57ae38b"} Nov 21 14:03:33 crc kubenswrapper[5133]: I1121 14:03:33.250730 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6888-account-create-sk6x7" event={"ID":"a35d3d79-1e17-4750-9246-57363851ddd0","Type":"ContainerStarted","Data":"52a0edca72e32926b626b0f424f03afd19fa008246d270965320ec6721d3d602"} Nov 21 14:03:33 crc kubenswrapper[5133]: I1121 14:03:33.251930 5133 generic.go:334] "Generic (PLEG): container finished" podID="ab41e086-5fe6-40db-9293-7f8e01f43b08" containerID="82d4bdea738c4b67f9b4bc1a3b9e96a52ba47d5f691097cd4ff760c1a2c5a272" exitCode=0 Nov 21 14:03:33 crc kubenswrapper[5133]: I1121 14:03:33.251972 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-f69d-account-create-6ct9x" event={"ID":"ab41e086-5fe6-40db-9293-7f8e01f43b08","Type":"ContainerDied","Data":"82d4bdea738c4b67f9b4bc1a3b9e96a52ba47d5f691097cd4ff760c1a2c5a272"} Nov 21 14:03:33 crc kubenswrapper[5133]: I1121 14:03:33.255328 5133 generic.go:334] "Generic (PLEG): container finished" podID="b48c94e4-c8f8-42fb-90b3-d300991ac5d6" containerID="4245b8bbae7058ae2b18cfc715da4741937ddd97a0fe3033bccdcf669791b2b9" exitCode=0 Nov 21 14:03:33 crc kubenswrapper[5133]: I1121 14:03:33.255567 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-ghlcg" event={"ID":"b48c94e4-c8f8-42fb-90b3-d300991ac5d6","Type":"ContainerDied","Data":"4245b8bbae7058ae2b18cfc715da4741937ddd97a0fe3033bccdcf669791b2b9"} Nov 21 14:03:33 crc kubenswrapper[5133]: I1121 14:03:33.692329 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-fj9b9" Nov 21 14:03:33 crc kubenswrapper[5133]: I1121 14:03:33.779759 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/43b30a4a-3bf5-4a77-9422-01336a998d47-operator-scripts\") pod \"43b30a4a-3bf5-4a77-9422-01336a998d47\" (UID: \"43b30a4a-3bf5-4a77-9422-01336a998d47\") " Nov 21 14:03:33 crc kubenswrapper[5133]: I1121 14:03:33.779923 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q5d7r\" (UniqueName: \"kubernetes.io/projected/43b30a4a-3bf5-4a77-9422-01336a998d47-kube-api-access-q5d7r\") pod \"43b30a4a-3bf5-4a77-9422-01336a998d47\" (UID: \"43b30a4a-3bf5-4a77-9422-01336a998d47\") " Nov 21 14:03:33 crc kubenswrapper[5133]: I1121 14:03:33.781041 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43b30a4a-3bf5-4a77-9422-01336a998d47-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "43b30a4a-3bf5-4a77-9422-01336a998d47" (UID: "43b30a4a-3bf5-4a77-9422-01336a998d47"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:03:33 crc kubenswrapper[5133]: I1121 14:03:33.795321 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43b30a4a-3bf5-4a77-9422-01336a998d47-kube-api-access-q5d7r" (OuterVolumeSpecName: "kube-api-access-q5d7r") pod "43b30a4a-3bf5-4a77-9422-01336a998d47" (UID: "43b30a4a-3bf5-4a77-9422-01336a998d47"). InnerVolumeSpecName "kube-api-access-q5d7r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:03:33 crc kubenswrapper[5133]: I1121 14:03:33.882662 5133 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/43b30a4a-3bf5-4a77-9422-01336a998d47-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 14:03:33 crc kubenswrapper[5133]: I1121 14:03:33.882910 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q5d7r\" (UniqueName: \"kubernetes.io/projected/43b30a4a-3bf5-4a77-9422-01336a998d47-kube-api-access-q5d7r\") on node \"crc\" DevicePath \"\"" Nov 21 14:03:34 crc kubenswrapper[5133]: I1121 14:03:34.267636 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-fj9b9" Nov 21 14:03:34 crc kubenswrapper[5133]: I1121 14:03:34.267669 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-fj9b9" event={"ID":"43b30a4a-3bf5-4a77-9422-01336a998d47","Type":"ContainerDied","Data":"d17a20a2d9f6e425ffa3089ac2a636b4333deb71cacf6fd38659fdcd936a2fc0"} Nov 21 14:03:34 crc kubenswrapper[5133]: I1121 14:03:34.267720 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d17a20a2d9f6e425ffa3089ac2a636b4333deb71cacf6fd38659fdcd936a2fc0" Nov 21 14:03:34 crc kubenswrapper[5133]: I1121 14:03:34.652911 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-f69d-account-create-6ct9x" Nov 21 14:03:34 crc kubenswrapper[5133]: I1121 14:03:34.806652 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ab41e086-5fe6-40db-9293-7f8e01f43b08-operator-scripts\") pod \"ab41e086-5fe6-40db-9293-7f8e01f43b08\" (UID: \"ab41e086-5fe6-40db-9293-7f8e01f43b08\") " Nov 21 14:03:34 crc kubenswrapper[5133]: I1121 14:03:34.807044 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5xfd5\" (UniqueName: \"kubernetes.io/projected/ab41e086-5fe6-40db-9293-7f8e01f43b08-kube-api-access-5xfd5\") pod \"ab41e086-5fe6-40db-9293-7f8e01f43b08\" (UID: \"ab41e086-5fe6-40db-9293-7f8e01f43b08\") " Nov 21 14:03:34 crc kubenswrapper[5133]: I1121 14:03:34.807218 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ab41e086-5fe6-40db-9293-7f8e01f43b08-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ab41e086-5fe6-40db-9293-7f8e01f43b08" (UID: "ab41e086-5fe6-40db-9293-7f8e01f43b08"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:03:34 crc kubenswrapper[5133]: I1121 14:03:34.807444 5133 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ab41e086-5fe6-40db-9293-7f8e01f43b08-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 14:03:34 crc kubenswrapper[5133]: I1121 14:03:34.812544 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ab41e086-5fe6-40db-9293-7f8e01f43b08-kube-api-access-5xfd5" (OuterVolumeSpecName: "kube-api-access-5xfd5") pod "ab41e086-5fe6-40db-9293-7f8e01f43b08" (UID: "ab41e086-5fe6-40db-9293-7f8e01f43b08"). InnerVolumeSpecName "kube-api-access-5xfd5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:03:34 crc kubenswrapper[5133]: I1121 14:03:34.857136 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-ghlcg" Nov 21 14:03:34 crc kubenswrapper[5133]: I1121 14:03:34.883326 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-6888-account-create-sk6x7" Nov 21 14:03:34 crc kubenswrapper[5133]: I1121 14:03:34.901062 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-18e5-account-create-rmg6q" Nov 21 14:03:34 crc kubenswrapper[5133]: I1121 14:03:34.910814 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5xfd5\" (UniqueName: \"kubernetes.io/projected/ab41e086-5fe6-40db-9293-7f8e01f43b08-kube-api-access-5xfd5\") on node \"crc\" DevicePath \"\"" Nov 21 14:03:34 crc kubenswrapper[5133]: I1121 14:03:34.920251 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-7vjhl" Nov 21 14:03:35 crc kubenswrapper[5133]: I1121 14:03:35.012470 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k2wn2\" (UniqueName: \"kubernetes.io/projected/2d89170b-8c95-42fa-90bd-35edc597fc2f-kube-api-access-k2wn2\") pod \"2d89170b-8c95-42fa-90bd-35edc597fc2f\" (UID: \"2d89170b-8c95-42fa-90bd-35edc597fc2f\") " Nov 21 14:03:35 crc kubenswrapper[5133]: I1121 14:03:35.012630 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a35d3d79-1e17-4750-9246-57363851ddd0-operator-scripts\") pod \"a35d3d79-1e17-4750-9246-57363851ddd0\" (UID: \"a35d3d79-1e17-4750-9246-57363851ddd0\") " Nov 21 14:03:35 crc kubenswrapper[5133]: I1121 14:03:35.013291 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2d89170b-8c95-42fa-90bd-35edc597fc2f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "2d89170b-8c95-42fa-90bd-35edc597fc2f" (UID: "2d89170b-8c95-42fa-90bd-35edc597fc2f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:03:35 crc kubenswrapper[5133]: I1121 14:03:35.013326 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a35d3d79-1e17-4750-9246-57363851ddd0-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a35d3d79-1e17-4750-9246-57363851ddd0" (UID: "a35d3d79-1e17-4750-9246-57363851ddd0"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:03:35 crc kubenswrapper[5133]: I1121 14:03:35.013377 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2d89170b-8c95-42fa-90bd-35edc597fc2f-operator-scripts\") pod \"2d89170b-8c95-42fa-90bd-35edc597fc2f\" (UID: \"2d89170b-8c95-42fa-90bd-35edc597fc2f\") " Nov 21 14:03:35 crc kubenswrapper[5133]: I1121 14:03:35.013476 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vrt6d\" (UniqueName: \"kubernetes.io/projected/d32a6944-2567-4e79-9fee-d0aecb16f40e-kube-api-access-vrt6d\") pod \"d32a6944-2567-4e79-9fee-d0aecb16f40e\" (UID: \"d32a6944-2567-4e79-9fee-d0aecb16f40e\") " Nov 21 14:03:35 crc kubenswrapper[5133]: I1121 14:03:35.013880 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fpr7r\" (UniqueName: \"kubernetes.io/projected/a35d3d79-1e17-4750-9246-57363851ddd0-kube-api-access-fpr7r\") pod \"a35d3d79-1e17-4750-9246-57363851ddd0\" (UID: \"a35d3d79-1e17-4750-9246-57363851ddd0\") " Nov 21 14:03:35 crc kubenswrapper[5133]: I1121 14:03:35.013966 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-47sr2\" (UniqueName: \"kubernetes.io/projected/b48c94e4-c8f8-42fb-90b3-d300991ac5d6-kube-api-access-47sr2\") pod \"b48c94e4-c8f8-42fb-90b3-d300991ac5d6\" (UID: \"b48c94e4-c8f8-42fb-90b3-d300991ac5d6\") " Nov 21 14:03:35 crc kubenswrapper[5133]: I1121 14:03:35.014105 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d32a6944-2567-4e79-9fee-d0aecb16f40e-operator-scripts\") pod \"d32a6944-2567-4e79-9fee-d0aecb16f40e\" (UID: \"d32a6944-2567-4e79-9fee-d0aecb16f40e\") " Nov 21 14:03:35 crc kubenswrapper[5133]: I1121 14:03:35.014143 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b48c94e4-c8f8-42fb-90b3-d300991ac5d6-operator-scripts\") pod \"b48c94e4-c8f8-42fb-90b3-d300991ac5d6\" (UID: \"b48c94e4-c8f8-42fb-90b3-d300991ac5d6\") " Nov 21 14:03:35 crc kubenswrapper[5133]: I1121 14:03:35.014627 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d32a6944-2567-4e79-9fee-d0aecb16f40e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d32a6944-2567-4e79-9fee-d0aecb16f40e" (UID: "d32a6944-2567-4e79-9fee-d0aecb16f40e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:03:35 crc kubenswrapper[5133]: I1121 14:03:35.014934 5133 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d32a6944-2567-4e79-9fee-d0aecb16f40e-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 14:03:35 crc kubenswrapper[5133]: I1121 14:03:35.014951 5133 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a35d3d79-1e17-4750-9246-57363851ddd0-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 14:03:35 crc kubenswrapper[5133]: I1121 14:03:35.014960 5133 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2d89170b-8c95-42fa-90bd-35edc597fc2f-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 14:03:35 crc kubenswrapper[5133]: I1121 14:03:35.015036 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b48c94e4-c8f8-42fb-90b3-d300991ac5d6-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b48c94e4-c8f8-42fb-90b3-d300991ac5d6" (UID: "b48c94e4-c8f8-42fb-90b3-d300991ac5d6"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:03:35 crc kubenswrapper[5133]: I1121 14:03:35.017643 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a35d3d79-1e17-4750-9246-57363851ddd0-kube-api-access-fpr7r" (OuterVolumeSpecName: "kube-api-access-fpr7r") pod "a35d3d79-1e17-4750-9246-57363851ddd0" (UID: "a35d3d79-1e17-4750-9246-57363851ddd0"). InnerVolumeSpecName "kube-api-access-fpr7r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:03:35 crc kubenswrapper[5133]: I1121 14:03:35.017950 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2d89170b-8c95-42fa-90bd-35edc597fc2f-kube-api-access-k2wn2" (OuterVolumeSpecName: "kube-api-access-k2wn2") pod "2d89170b-8c95-42fa-90bd-35edc597fc2f" (UID: "2d89170b-8c95-42fa-90bd-35edc597fc2f"). InnerVolumeSpecName "kube-api-access-k2wn2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:03:35 crc kubenswrapper[5133]: I1121 14:03:35.018014 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d32a6944-2567-4e79-9fee-d0aecb16f40e-kube-api-access-vrt6d" (OuterVolumeSpecName: "kube-api-access-vrt6d") pod "d32a6944-2567-4e79-9fee-d0aecb16f40e" (UID: "d32a6944-2567-4e79-9fee-d0aecb16f40e"). InnerVolumeSpecName "kube-api-access-vrt6d". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:03:35 crc kubenswrapper[5133]: I1121 14:03:35.018260 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b48c94e4-c8f8-42fb-90b3-d300991ac5d6-kube-api-access-47sr2" (OuterVolumeSpecName: "kube-api-access-47sr2") pod "b48c94e4-c8f8-42fb-90b3-d300991ac5d6" (UID: "b48c94e4-c8f8-42fb-90b3-d300991ac5d6"). InnerVolumeSpecName "kube-api-access-47sr2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:03:35 crc kubenswrapper[5133]: I1121 14:03:35.116879 5133 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b48c94e4-c8f8-42fb-90b3-d300991ac5d6-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 14:03:35 crc kubenswrapper[5133]: I1121 14:03:35.116936 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k2wn2\" (UniqueName: \"kubernetes.io/projected/2d89170b-8c95-42fa-90bd-35edc597fc2f-kube-api-access-k2wn2\") on node \"crc\" DevicePath \"\"" Nov 21 14:03:35 crc kubenswrapper[5133]: I1121 14:03:35.116956 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vrt6d\" (UniqueName: \"kubernetes.io/projected/d32a6944-2567-4e79-9fee-d0aecb16f40e-kube-api-access-vrt6d\") on node \"crc\" DevicePath \"\"" Nov 21 14:03:35 crc kubenswrapper[5133]: I1121 14:03:35.116969 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fpr7r\" (UniqueName: \"kubernetes.io/projected/a35d3d79-1e17-4750-9246-57363851ddd0-kube-api-access-fpr7r\") on node \"crc\" DevicePath \"\"" Nov 21 14:03:35 crc kubenswrapper[5133]: I1121 14:03:35.116982 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-47sr2\" (UniqueName: \"kubernetes.io/projected/b48c94e4-c8f8-42fb-90b3-d300991ac5d6-kube-api-access-47sr2\") on node \"crc\" DevicePath \"\"" Nov 21 14:03:35 crc kubenswrapper[5133]: I1121 14:03:35.288599 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-ghlcg" event={"ID":"b48c94e4-c8f8-42fb-90b3-d300991ac5d6","Type":"ContainerDied","Data":"e9433aff936b131ab0a1c2c9b76eac62f924916d8cddf1eb26253bb26a5c1586"} Nov 21 14:03:35 crc kubenswrapper[5133]: I1121 14:03:35.288656 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e9433aff936b131ab0a1c2c9b76eac62f924916d8cddf1eb26253bb26a5c1586" Nov 21 14:03:35 crc kubenswrapper[5133]: I1121 14:03:35.288737 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-ghlcg" Nov 21 14:03:35 crc kubenswrapper[5133]: I1121 14:03:35.296830 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-7vjhl" event={"ID":"d32a6944-2567-4e79-9fee-d0aecb16f40e","Type":"ContainerDied","Data":"c7fa2f91e43a26d4902961961d3f0058e4b0ca409fdcef7c7cedbfa3983fcf52"} Nov 21 14:03:35 crc kubenswrapper[5133]: I1121 14:03:35.296863 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c7fa2f91e43a26d4902961961d3f0058e4b0ca409fdcef7c7cedbfa3983fcf52" Nov 21 14:03:35 crc kubenswrapper[5133]: I1121 14:03:35.296919 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-7vjhl" Nov 21 14:03:35 crc kubenswrapper[5133]: I1121 14:03:35.298429 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-18e5-account-create-rmg6q" event={"ID":"2d89170b-8c95-42fa-90bd-35edc597fc2f","Type":"ContainerDied","Data":"99379c5030a5bab8610e39e4e19ea6a166ca5eb6f0d51a30d33a240f57b536e1"} Nov 21 14:03:35 crc kubenswrapper[5133]: I1121 14:03:35.298452 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="99379c5030a5bab8610e39e4e19ea6a166ca5eb6f0d51a30d33a240f57b536e1" Nov 21 14:03:35 crc kubenswrapper[5133]: I1121 14:03:35.298518 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-18e5-account-create-rmg6q" Nov 21 14:03:35 crc kubenswrapper[5133]: I1121 14:03:35.300676 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-6888-account-create-sk6x7" Nov 21 14:03:35 crc kubenswrapper[5133]: I1121 14:03:35.301158 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6888-account-create-sk6x7" event={"ID":"a35d3d79-1e17-4750-9246-57363851ddd0","Type":"ContainerDied","Data":"52a0edca72e32926b626b0f424f03afd19fa008246d270965320ec6721d3d602"} Nov 21 14:03:35 crc kubenswrapper[5133]: I1121 14:03:35.301209 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="52a0edca72e32926b626b0f424f03afd19fa008246d270965320ec6721d3d602" Nov 21 14:03:35 crc kubenswrapper[5133]: I1121 14:03:35.303875 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-f69d-account-create-6ct9x" event={"ID":"ab41e086-5fe6-40db-9293-7f8e01f43b08","Type":"ContainerDied","Data":"2a64c30444497b846f06f596378f1e86ff315930f6ace7a2f346049400d02f92"} Nov 21 14:03:35 crc kubenswrapper[5133]: I1121 14:03:35.303900 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2a64c30444497b846f06f596378f1e86ff315930f6ace7a2f346049400d02f92" Nov 21 14:03:35 crc kubenswrapper[5133]: I1121 14:03:35.303946 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-f69d-account-create-6ct9x" Nov 21 14:03:38 crc kubenswrapper[5133]: I1121 14:03:38.344065 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-tshmb" event={"ID":"12e06aee-ac34-4d50-aa07-42f326d55fb2","Type":"ContainerStarted","Data":"da90ab00cb608f0d016ddf04a222142730d338b65882482d85d8fe3dadbbdff9"} Nov 21 14:03:38 crc kubenswrapper[5133]: I1121 14:03:38.376025 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-tshmb" podStartSLOduration=1.866414215 podStartE2EDuration="7.375967991s" podCreationTimestamp="2025-11-21 14:03:31 +0000 UTC" firstStartedPulling="2025-11-21 14:03:32.205224024 +0000 UTC m=+1272.003056272" lastFinishedPulling="2025-11-21 14:03:37.7147778 +0000 UTC m=+1277.512610048" observedRunningTime="2025-11-21 14:03:38.366417345 +0000 UTC m=+1278.164249613" watchObservedRunningTime="2025-11-21 14:03:38.375967991 +0000 UTC m=+1278.173800249" Nov 21 14:03:41 crc kubenswrapper[5133]: I1121 14:03:41.376868 5133 generic.go:334] "Generic (PLEG): container finished" podID="12e06aee-ac34-4d50-aa07-42f326d55fb2" containerID="da90ab00cb608f0d016ddf04a222142730d338b65882482d85d8fe3dadbbdff9" exitCode=0 Nov 21 14:03:41 crc kubenswrapper[5133]: I1121 14:03:41.377048 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-tshmb" event={"ID":"12e06aee-ac34-4d50-aa07-42f326d55fb2","Type":"ContainerDied","Data":"da90ab00cb608f0d016ddf04a222142730d338b65882482d85d8fe3dadbbdff9"} Nov 21 14:03:42 crc kubenswrapper[5133]: I1121 14:03:42.734234 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-tshmb" Nov 21 14:03:42 crc kubenswrapper[5133]: I1121 14:03:42.939798 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/12e06aee-ac34-4d50-aa07-42f326d55fb2-config-data\") pod \"12e06aee-ac34-4d50-aa07-42f326d55fb2\" (UID: \"12e06aee-ac34-4d50-aa07-42f326d55fb2\") " Nov 21 14:03:42 crc kubenswrapper[5133]: I1121 14:03:42.940063 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kw9rp\" (UniqueName: \"kubernetes.io/projected/12e06aee-ac34-4d50-aa07-42f326d55fb2-kube-api-access-kw9rp\") pod \"12e06aee-ac34-4d50-aa07-42f326d55fb2\" (UID: \"12e06aee-ac34-4d50-aa07-42f326d55fb2\") " Nov 21 14:03:42 crc kubenswrapper[5133]: I1121 14:03:42.940296 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/12e06aee-ac34-4d50-aa07-42f326d55fb2-combined-ca-bundle\") pod \"12e06aee-ac34-4d50-aa07-42f326d55fb2\" (UID: \"12e06aee-ac34-4d50-aa07-42f326d55fb2\") " Nov 21 14:03:42 crc kubenswrapper[5133]: I1121 14:03:42.953936 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/12e06aee-ac34-4d50-aa07-42f326d55fb2-kube-api-access-kw9rp" (OuterVolumeSpecName: "kube-api-access-kw9rp") pod "12e06aee-ac34-4d50-aa07-42f326d55fb2" (UID: "12e06aee-ac34-4d50-aa07-42f326d55fb2"). InnerVolumeSpecName "kube-api-access-kw9rp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:03:42 crc kubenswrapper[5133]: I1121 14:03:42.993344 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/12e06aee-ac34-4d50-aa07-42f326d55fb2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "12e06aee-ac34-4d50-aa07-42f326d55fb2" (UID: "12e06aee-ac34-4d50-aa07-42f326d55fb2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:03:43 crc kubenswrapper[5133]: I1121 14:03:43.030518 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/12e06aee-ac34-4d50-aa07-42f326d55fb2-config-data" (OuterVolumeSpecName: "config-data") pod "12e06aee-ac34-4d50-aa07-42f326d55fb2" (UID: "12e06aee-ac34-4d50-aa07-42f326d55fb2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:03:43 crc kubenswrapper[5133]: I1121 14:03:43.043365 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kw9rp\" (UniqueName: \"kubernetes.io/projected/12e06aee-ac34-4d50-aa07-42f326d55fb2-kube-api-access-kw9rp\") on node \"crc\" DevicePath \"\"" Nov 21 14:03:43 crc kubenswrapper[5133]: I1121 14:03:43.043408 5133 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/12e06aee-ac34-4d50-aa07-42f326d55fb2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 14:03:43 crc kubenswrapper[5133]: I1121 14:03:43.043418 5133 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/12e06aee-ac34-4d50-aa07-42f326d55fb2-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 14:03:43 crc kubenswrapper[5133]: I1121 14:03:43.424309 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-6blxq" event={"ID":"76b580d8-fd56-40c0-a24a-1a3234d95ca6","Type":"ContainerStarted","Data":"af35a52559600ac679d265671a6b2484259a5617266456e79ef435f6c440aef9"} Nov 21 14:03:43 crc kubenswrapper[5133]: I1121 14:03:43.435386 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-tshmb" event={"ID":"12e06aee-ac34-4d50-aa07-42f326d55fb2","Type":"ContainerDied","Data":"437a799f919d00045cfeabbbf039c0506ec830cc78104dea688b4c7ec4f580bc"} Nov 21 14:03:43 crc kubenswrapper[5133]: I1121 14:03:43.435463 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="437a799f919d00045cfeabbbf039c0506ec830cc78104dea688b4c7ec4f580bc" Nov 21 14:03:43 crc kubenswrapper[5133]: I1121 14:03:43.435491 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-tshmb" Nov 21 14:03:43 crc kubenswrapper[5133]: I1121 14:03:43.472507 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-6blxq" podStartSLOduration=2.622409305 podStartE2EDuration="34.472469044s" podCreationTimestamp="2025-11-21 14:03:09 +0000 UTC" firstStartedPulling="2025-11-21 14:03:10.030296197 +0000 UTC m=+1249.828128445" lastFinishedPulling="2025-11-21 14:03:41.880355916 +0000 UTC m=+1281.678188184" observedRunningTime="2025-11-21 14:03:43.464348477 +0000 UTC m=+1283.262180805" watchObservedRunningTime="2025-11-21 14:03:43.472469044 +0000 UTC m=+1283.270301312" Nov 21 14:03:43 crc kubenswrapper[5133]: I1121 14:03:43.725639 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-66fbd85b65-m9ffq"] Nov 21 14:03:43 crc kubenswrapper[5133]: E1121 14:03:43.726494 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d89170b-8c95-42fa-90bd-35edc597fc2f" containerName="mariadb-account-create" Nov 21 14:03:43 crc kubenswrapper[5133]: I1121 14:03:43.726514 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d89170b-8c95-42fa-90bd-35edc597fc2f" containerName="mariadb-account-create" Nov 21 14:03:43 crc kubenswrapper[5133]: E1121 14:03:43.726531 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ab41e086-5fe6-40db-9293-7f8e01f43b08" containerName="mariadb-account-create" Nov 21 14:03:43 crc kubenswrapper[5133]: I1121 14:03:43.726538 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab41e086-5fe6-40db-9293-7f8e01f43b08" containerName="mariadb-account-create" Nov 21 14:03:43 crc kubenswrapper[5133]: E1121 14:03:43.726546 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a35d3d79-1e17-4750-9246-57363851ddd0" containerName="mariadb-account-create" Nov 21 14:03:43 crc kubenswrapper[5133]: I1121 14:03:43.726553 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="a35d3d79-1e17-4750-9246-57363851ddd0" containerName="mariadb-account-create" Nov 21 14:03:43 crc kubenswrapper[5133]: E1121 14:03:43.726568 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12e06aee-ac34-4d50-aa07-42f326d55fb2" containerName="keystone-db-sync" Nov 21 14:03:43 crc kubenswrapper[5133]: I1121 14:03:43.726574 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="12e06aee-ac34-4d50-aa07-42f326d55fb2" containerName="keystone-db-sync" Nov 21 14:03:43 crc kubenswrapper[5133]: E1121 14:03:43.726589 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="43b30a4a-3bf5-4a77-9422-01336a998d47" containerName="mariadb-database-create" Nov 21 14:03:43 crc kubenswrapper[5133]: I1121 14:03:43.726596 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="43b30a4a-3bf5-4a77-9422-01336a998d47" containerName="mariadb-database-create" Nov 21 14:03:43 crc kubenswrapper[5133]: E1121 14:03:43.726606 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d32a6944-2567-4e79-9fee-d0aecb16f40e" containerName="mariadb-database-create" Nov 21 14:03:43 crc kubenswrapper[5133]: I1121 14:03:43.726612 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="d32a6944-2567-4e79-9fee-d0aecb16f40e" containerName="mariadb-database-create" Nov 21 14:03:43 crc kubenswrapper[5133]: E1121 14:03:43.726622 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b48c94e4-c8f8-42fb-90b3-d300991ac5d6" containerName="mariadb-database-create" Nov 21 14:03:43 crc kubenswrapper[5133]: I1121 14:03:43.726629 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="b48c94e4-c8f8-42fb-90b3-d300991ac5d6" containerName="mariadb-database-create" Nov 21 14:03:43 crc kubenswrapper[5133]: I1121 14:03:43.726789 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="b48c94e4-c8f8-42fb-90b3-d300991ac5d6" containerName="mariadb-database-create" Nov 21 14:03:43 crc kubenswrapper[5133]: I1121 14:03:43.726832 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d89170b-8c95-42fa-90bd-35edc597fc2f" containerName="mariadb-account-create" Nov 21 14:03:43 crc kubenswrapper[5133]: I1121 14:03:43.726842 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="ab41e086-5fe6-40db-9293-7f8e01f43b08" containerName="mariadb-account-create" Nov 21 14:03:43 crc kubenswrapper[5133]: I1121 14:03:43.726868 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="43b30a4a-3bf5-4a77-9422-01336a998d47" containerName="mariadb-database-create" Nov 21 14:03:43 crc kubenswrapper[5133]: I1121 14:03:43.726881 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="a35d3d79-1e17-4750-9246-57363851ddd0" containerName="mariadb-account-create" Nov 21 14:03:43 crc kubenswrapper[5133]: I1121 14:03:43.726895 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="d32a6944-2567-4e79-9fee-d0aecb16f40e" containerName="mariadb-database-create" Nov 21 14:03:43 crc kubenswrapper[5133]: I1121 14:03:43.726910 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="12e06aee-ac34-4d50-aa07-42f326d55fb2" containerName="keystone-db-sync" Nov 21 14:03:43 crc kubenswrapper[5133]: I1121 14:03:43.738314 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-66fbd85b65-m9ffq" Nov 21 14:03:43 crc kubenswrapper[5133]: I1121 14:03:43.780053 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-66fbd85b65-m9ffq"] Nov 21 14:03:43 crc kubenswrapper[5133]: I1121 14:03:43.846098 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-4ztk6"] Nov 21 14:03:43 crc kubenswrapper[5133]: I1121 14:03:43.848074 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-4ztk6" Nov 21 14:03:43 crc kubenswrapper[5133]: I1121 14:03:43.854487 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 21 14:03:43 crc kubenswrapper[5133]: I1121 14:03:43.854739 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 21 14:03:43 crc kubenswrapper[5133]: I1121 14:03:43.854888 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-bv5lp" Nov 21 14:03:43 crc kubenswrapper[5133]: I1121 14:03:43.854954 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 21 14:03:43 crc kubenswrapper[5133]: I1121 14:03:43.855057 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 21 14:03:43 crc kubenswrapper[5133]: I1121 14:03:43.864206 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ccddde96-ed5a-4ebe-8f9d-9cb64115f434-config\") pod \"dnsmasq-dns-66fbd85b65-m9ffq\" (UID: \"ccddde96-ed5a-4ebe-8f9d-9cb64115f434\") " pod="openstack/dnsmasq-dns-66fbd85b65-m9ffq" Nov 21 14:03:43 crc kubenswrapper[5133]: I1121 14:03:43.864284 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ccddde96-ed5a-4ebe-8f9d-9cb64115f434-ovsdbserver-nb\") pod \"dnsmasq-dns-66fbd85b65-m9ffq\" (UID: \"ccddde96-ed5a-4ebe-8f9d-9cb64115f434\") " pod="openstack/dnsmasq-dns-66fbd85b65-m9ffq" Nov 21 14:03:43 crc kubenswrapper[5133]: I1121 14:03:43.864321 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ccddde96-ed5a-4ebe-8f9d-9cb64115f434-dns-svc\") pod \"dnsmasq-dns-66fbd85b65-m9ffq\" (UID: \"ccddde96-ed5a-4ebe-8f9d-9cb64115f434\") " pod="openstack/dnsmasq-dns-66fbd85b65-m9ffq" Nov 21 14:03:43 crc kubenswrapper[5133]: I1121 14:03:43.864356 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ccddde96-ed5a-4ebe-8f9d-9cb64115f434-ovsdbserver-sb\") pod \"dnsmasq-dns-66fbd85b65-m9ffq\" (UID: \"ccddde96-ed5a-4ebe-8f9d-9cb64115f434\") " pod="openstack/dnsmasq-dns-66fbd85b65-m9ffq" Nov 21 14:03:43 crc kubenswrapper[5133]: I1121 14:03:43.864410 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t6wb6\" (UniqueName: \"kubernetes.io/projected/ccddde96-ed5a-4ebe-8f9d-9cb64115f434-kube-api-access-t6wb6\") pod \"dnsmasq-dns-66fbd85b65-m9ffq\" (UID: \"ccddde96-ed5a-4ebe-8f9d-9cb64115f434\") " pod="openstack/dnsmasq-dns-66fbd85b65-m9ffq" Nov 21 14:03:43 crc kubenswrapper[5133]: I1121 14:03:43.878522 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-4ztk6"] Nov 21 14:03:43 crc kubenswrapper[5133]: I1121 14:03:43.966760 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2lhrm\" (UniqueName: \"kubernetes.io/projected/38d44e0f-f488-4cc9-bfa2-6eb2830cc241-kube-api-access-2lhrm\") pod \"keystone-bootstrap-4ztk6\" (UID: \"38d44e0f-f488-4cc9-bfa2-6eb2830cc241\") " pod="openstack/keystone-bootstrap-4ztk6" Nov 21 14:03:43 crc kubenswrapper[5133]: I1121 14:03:43.966866 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/38d44e0f-f488-4cc9-bfa2-6eb2830cc241-config-data\") pod \"keystone-bootstrap-4ztk6\" (UID: \"38d44e0f-f488-4cc9-bfa2-6eb2830cc241\") " pod="openstack/keystone-bootstrap-4ztk6" Nov 21 14:03:43 crc kubenswrapper[5133]: I1121 14:03:43.966896 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/38d44e0f-f488-4cc9-bfa2-6eb2830cc241-scripts\") pod \"keystone-bootstrap-4ztk6\" (UID: \"38d44e0f-f488-4cc9-bfa2-6eb2830cc241\") " pod="openstack/keystone-bootstrap-4ztk6" Nov 21 14:03:43 crc kubenswrapper[5133]: I1121 14:03:43.966933 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t6wb6\" (UniqueName: \"kubernetes.io/projected/ccddde96-ed5a-4ebe-8f9d-9cb64115f434-kube-api-access-t6wb6\") pod \"dnsmasq-dns-66fbd85b65-m9ffq\" (UID: \"ccddde96-ed5a-4ebe-8f9d-9cb64115f434\") " pod="openstack/dnsmasq-dns-66fbd85b65-m9ffq" Nov 21 14:03:43 crc kubenswrapper[5133]: I1121 14:03:43.967044 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/38d44e0f-f488-4cc9-bfa2-6eb2830cc241-credential-keys\") pod \"keystone-bootstrap-4ztk6\" (UID: \"38d44e0f-f488-4cc9-bfa2-6eb2830cc241\") " pod="openstack/keystone-bootstrap-4ztk6" Nov 21 14:03:43 crc kubenswrapper[5133]: I1121 14:03:43.967079 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ccddde96-ed5a-4ebe-8f9d-9cb64115f434-config\") pod \"dnsmasq-dns-66fbd85b65-m9ffq\" (UID: \"ccddde96-ed5a-4ebe-8f9d-9cb64115f434\") " pod="openstack/dnsmasq-dns-66fbd85b65-m9ffq" Nov 21 14:03:43 crc kubenswrapper[5133]: I1121 14:03:43.967114 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/38d44e0f-f488-4cc9-bfa2-6eb2830cc241-fernet-keys\") pod \"keystone-bootstrap-4ztk6\" (UID: \"38d44e0f-f488-4cc9-bfa2-6eb2830cc241\") " pod="openstack/keystone-bootstrap-4ztk6" Nov 21 14:03:43 crc kubenswrapper[5133]: I1121 14:03:43.967191 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ccddde96-ed5a-4ebe-8f9d-9cb64115f434-ovsdbserver-nb\") pod \"dnsmasq-dns-66fbd85b65-m9ffq\" (UID: \"ccddde96-ed5a-4ebe-8f9d-9cb64115f434\") " pod="openstack/dnsmasq-dns-66fbd85b65-m9ffq" Nov 21 14:03:43 crc kubenswrapper[5133]: I1121 14:03:43.967240 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38d44e0f-f488-4cc9-bfa2-6eb2830cc241-combined-ca-bundle\") pod \"keystone-bootstrap-4ztk6\" (UID: \"38d44e0f-f488-4cc9-bfa2-6eb2830cc241\") " pod="openstack/keystone-bootstrap-4ztk6" Nov 21 14:03:43 crc kubenswrapper[5133]: I1121 14:03:43.967282 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ccddde96-ed5a-4ebe-8f9d-9cb64115f434-dns-svc\") pod \"dnsmasq-dns-66fbd85b65-m9ffq\" (UID: \"ccddde96-ed5a-4ebe-8f9d-9cb64115f434\") " pod="openstack/dnsmasq-dns-66fbd85b65-m9ffq" Nov 21 14:03:43 crc kubenswrapper[5133]: I1121 14:03:43.967338 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ccddde96-ed5a-4ebe-8f9d-9cb64115f434-ovsdbserver-sb\") pod \"dnsmasq-dns-66fbd85b65-m9ffq\" (UID: \"ccddde96-ed5a-4ebe-8f9d-9cb64115f434\") " pod="openstack/dnsmasq-dns-66fbd85b65-m9ffq" Nov 21 14:03:43 crc kubenswrapper[5133]: I1121 14:03:43.973182 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ccddde96-ed5a-4ebe-8f9d-9cb64115f434-dns-svc\") pod \"dnsmasq-dns-66fbd85b65-m9ffq\" (UID: \"ccddde96-ed5a-4ebe-8f9d-9cb64115f434\") " pod="openstack/dnsmasq-dns-66fbd85b65-m9ffq" Nov 21 14:03:43 crc kubenswrapper[5133]: I1121 14:03:43.973875 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ccddde96-ed5a-4ebe-8f9d-9cb64115f434-ovsdbserver-sb\") pod \"dnsmasq-dns-66fbd85b65-m9ffq\" (UID: \"ccddde96-ed5a-4ebe-8f9d-9cb64115f434\") " pod="openstack/dnsmasq-dns-66fbd85b65-m9ffq" Nov 21 14:03:43 crc kubenswrapper[5133]: I1121 14:03:43.974517 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ccddde96-ed5a-4ebe-8f9d-9cb64115f434-config\") pod \"dnsmasq-dns-66fbd85b65-m9ffq\" (UID: \"ccddde96-ed5a-4ebe-8f9d-9cb64115f434\") " pod="openstack/dnsmasq-dns-66fbd85b65-m9ffq" Nov 21 14:03:43 crc kubenswrapper[5133]: I1121 14:03:43.982420 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ccddde96-ed5a-4ebe-8f9d-9cb64115f434-ovsdbserver-nb\") pod \"dnsmasq-dns-66fbd85b65-m9ffq\" (UID: \"ccddde96-ed5a-4ebe-8f9d-9cb64115f434\") " pod="openstack/dnsmasq-dns-66fbd85b65-m9ffq" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.025880 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t6wb6\" (UniqueName: \"kubernetes.io/projected/ccddde96-ed5a-4ebe-8f9d-9cb64115f434-kube-api-access-t6wb6\") pod \"dnsmasq-dns-66fbd85b65-m9ffq\" (UID: \"ccddde96-ed5a-4ebe-8f9d-9cb64115f434\") " pod="openstack/dnsmasq-dns-66fbd85b65-m9ffq" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.078449 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2lhrm\" (UniqueName: \"kubernetes.io/projected/38d44e0f-f488-4cc9-bfa2-6eb2830cc241-kube-api-access-2lhrm\") pod \"keystone-bootstrap-4ztk6\" (UID: \"38d44e0f-f488-4cc9-bfa2-6eb2830cc241\") " pod="openstack/keystone-bootstrap-4ztk6" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.078834 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/38d44e0f-f488-4cc9-bfa2-6eb2830cc241-config-data\") pod \"keystone-bootstrap-4ztk6\" (UID: \"38d44e0f-f488-4cc9-bfa2-6eb2830cc241\") " pod="openstack/keystone-bootstrap-4ztk6" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.079551 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/38d44e0f-f488-4cc9-bfa2-6eb2830cc241-scripts\") pod \"keystone-bootstrap-4ztk6\" (UID: \"38d44e0f-f488-4cc9-bfa2-6eb2830cc241\") " pod="openstack/keystone-bootstrap-4ztk6" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.079747 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/38d44e0f-f488-4cc9-bfa2-6eb2830cc241-credential-keys\") pod \"keystone-bootstrap-4ztk6\" (UID: \"38d44e0f-f488-4cc9-bfa2-6eb2830cc241\") " pod="openstack/keystone-bootstrap-4ztk6" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.083585 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/38d44e0f-f488-4cc9-bfa2-6eb2830cc241-fernet-keys\") pod \"keystone-bootstrap-4ztk6\" (UID: \"38d44e0f-f488-4cc9-bfa2-6eb2830cc241\") " pod="openstack/keystone-bootstrap-4ztk6" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.083752 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38d44e0f-f488-4cc9-bfa2-6eb2830cc241-combined-ca-bundle\") pod \"keystone-bootstrap-4ztk6\" (UID: \"38d44e0f-f488-4cc9-bfa2-6eb2830cc241\") " pod="openstack/keystone-bootstrap-4ztk6" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.087369 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-8cz2f"] Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.096446 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/38d44e0f-f488-4cc9-bfa2-6eb2830cc241-scripts\") pod \"keystone-bootstrap-4ztk6\" (UID: \"38d44e0f-f488-4cc9-bfa2-6eb2830cc241\") " pod="openstack/keystone-bootstrap-4ztk6" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.097844 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-8cz2f" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.106595 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-66fbd85b65-m9ffq" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.109798 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/38d44e0f-f488-4cc9-bfa2-6eb2830cc241-config-data\") pod \"keystone-bootstrap-4ztk6\" (UID: \"38d44e0f-f488-4cc9-bfa2-6eb2830cc241\") " pod="openstack/keystone-bootstrap-4ztk6" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.145250 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-hf74x" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.145603 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.145634 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.145635 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/38d44e0f-f488-4cc9-bfa2-6eb2830cc241-fernet-keys\") pod \"keystone-bootstrap-4ztk6\" (UID: \"38d44e0f-f488-4cc9-bfa2-6eb2830cc241\") " pod="openstack/keystone-bootstrap-4ztk6" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.147678 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/38d44e0f-f488-4cc9-bfa2-6eb2830cc241-credential-keys\") pod \"keystone-bootstrap-4ztk6\" (UID: \"38d44e0f-f488-4cc9-bfa2-6eb2830cc241\") " pod="openstack/keystone-bootstrap-4ztk6" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.149512 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-8cz2f"] Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.162958 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2lhrm\" (UniqueName: \"kubernetes.io/projected/38d44e0f-f488-4cc9-bfa2-6eb2830cc241-kube-api-access-2lhrm\") pod \"keystone-bootstrap-4ztk6\" (UID: \"38d44e0f-f488-4cc9-bfa2-6eb2830cc241\") " pod="openstack/keystone-bootstrap-4ztk6" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.172515 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38d44e0f-f488-4cc9-bfa2-6eb2830cc241-combined-ca-bundle\") pod \"keystone-bootstrap-4ztk6\" (UID: \"38d44e0f-f488-4cc9-bfa2-6eb2830cc241\") " pod="openstack/keystone-bootstrap-4ztk6" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.182567 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-4ztk6" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.189453 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-m47ck"] Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.190850 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-m47ck" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.199561 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b61488dd-2db1-43b5-996b-43b76a5dbda6-etc-machine-id\") pod \"cinder-db-sync-8cz2f\" (UID: \"b61488dd-2db1-43b5-996b-43b76a5dbda6\") " pod="openstack/cinder-db-sync-8cz2f" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.199657 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b61488dd-2db1-43b5-996b-43b76a5dbda6-config-data\") pod \"cinder-db-sync-8cz2f\" (UID: \"b61488dd-2db1-43b5-996b-43b76a5dbda6\") " pod="openstack/cinder-db-sync-8cz2f" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.199698 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b61488dd-2db1-43b5-996b-43b76a5dbda6-scripts\") pod \"cinder-db-sync-8cz2f\" (UID: \"b61488dd-2db1-43b5-996b-43b76a5dbda6\") " pod="openstack/cinder-db-sync-8cz2f" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.199728 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b61488dd-2db1-43b5-996b-43b76a5dbda6-db-sync-config-data\") pod \"cinder-db-sync-8cz2f\" (UID: \"b61488dd-2db1-43b5-996b-43b76a5dbda6\") " pod="openstack/cinder-db-sync-8cz2f" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.199758 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b61488dd-2db1-43b5-996b-43b76a5dbda6-combined-ca-bundle\") pod \"cinder-db-sync-8cz2f\" (UID: \"b61488dd-2db1-43b5-996b-43b76a5dbda6\") " pod="openstack/cinder-db-sync-8cz2f" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.199881 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hdp96\" (UniqueName: \"kubernetes.io/projected/b61488dd-2db1-43b5-996b-43b76a5dbda6-kube-api-access-hdp96\") pod \"cinder-db-sync-8cz2f\" (UID: \"b61488dd-2db1-43b5-996b-43b76a5dbda6\") " pod="openstack/cinder-db-sync-8cz2f" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.200044 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-m47ck"] Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.201264 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.203623 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-58hz5" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.215299 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-26kdp"] Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.216783 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-26kdp" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.220689 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.220959 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.221118 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-5s6lj" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.222275 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-26kdp"] Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.303684 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hdp96\" (UniqueName: \"kubernetes.io/projected/b61488dd-2db1-43b5-996b-43b76a5dbda6-kube-api-access-hdp96\") pod \"cinder-db-sync-8cz2f\" (UID: \"b61488dd-2db1-43b5-996b-43b76a5dbda6\") " pod="openstack/cinder-db-sync-8cz2f" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.303728 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/944fb1ae-3bf4-479a-863d-62867fdf5b82-config\") pod \"neutron-db-sync-26kdp\" (UID: \"944fb1ae-3bf4-479a-863d-62867fdf5b82\") " pod="openstack/neutron-db-sync-26kdp" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.303764 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8nqc5\" (UniqueName: \"kubernetes.io/projected/944fb1ae-3bf4-479a-863d-62867fdf5b82-kube-api-access-8nqc5\") pod \"neutron-db-sync-26kdp\" (UID: \"944fb1ae-3bf4-479a-863d-62867fdf5b82\") " pod="openstack/neutron-db-sync-26kdp" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.303786 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7t567\" (UniqueName: \"kubernetes.io/projected/e5102115-b63e-42e2-8aae-1a68e7dda37c-kube-api-access-7t567\") pod \"barbican-db-sync-m47ck\" (UID: \"e5102115-b63e-42e2-8aae-1a68e7dda37c\") " pod="openstack/barbican-db-sync-m47ck" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.303810 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b61488dd-2db1-43b5-996b-43b76a5dbda6-etc-machine-id\") pod \"cinder-db-sync-8cz2f\" (UID: \"b61488dd-2db1-43b5-996b-43b76a5dbda6\") " pod="openstack/cinder-db-sync-8cz2f" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.303835 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5102115-b63e-42e2-8aae-1a68e7dda37c-combined-ca-bundle\") pod \"barbican-db-sync-m47ck\" (UID: \"e5102115-b63e-42e2-8aae-1a68e7dda37c\") " pod="openstack/barbican-db-sync-m47ck" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.303875 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b61488dd-2db1-43b5-996b-43b76a5dbda6-config-data\") pod \"cinder-db-sync-8cz2f\" (UID: \"b61488dd-2db1-43b5-996b-43b76a5dbda6\") " pod="openstack/cinder-db-sync-8cz2f" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.303896 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e5102115-b63e-42e2-8aae-1a68e7dda37c-db-sync-config-data\") pod \"barbican-db-sync-m47ck\" (UID: \"e5102115-b63e-42e2-8aae-1a68e7dda37c\") " pod="openstack/barbican-db-sync-m47ck" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.303914 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b61488dd-2db1-43b5-996b-43b76a5dbda6-scripts\") pod \"cinder-db-sync-8cz2f\" (UID: \"b61488dd-2db1-43b5-996b-43b76a5dbda6\") " pod="openstack/cinder-db-sync-8cz2f" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.303948 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b61488dd-2db1-43b5-996b-43b76a5dbda6-db-sync-config-data\") pod \"cinder-db-sync-8cz2f\" (UID: \"b61488dd-2db1-43b5-996b-43b76a5dbda6\") " pod="openstack/cinder-db-sync-8cz2f" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.303971 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b61488dd-2db1-43b5-996b-43b76a5dbda6-combined-ca-bundle\") pod \"cinder-db-sync-8cz2f\" (UID: \"b61488dd-2db1-43b5-996b-43b76a5dbda6\") " pod="openstack/cinder-db-sync-8cz2f" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.304047 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/944fb1ae-3bf4-479a-863d-62867fdf5b82-combined-ca-bundle\") pod \"neutron-db-sync-26kdp\" (UID: \"944fb1ae-3bf4-479a-863d-62867fdf5b82\") " pod="openstack/neutron-db-sync-26kdp" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.304520 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b61488dd-2db1-43b5-996b-43b76a5dbda6-etc-machine-id\") pod \"cinder-db-sync-8cz2f\" (UID: \"b61488dd-2db1-43b5-996b-43b76a5dbda6\") " pod="openstack/cinder-db-sync-8cz2f" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.316895 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-66fbd85b65-m9ffq"] Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.317647 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b61488dd-2db1-43b5-996b-43b76a5dbda6-scripts\") pod \"cinder-db-sync-8cz2f\" (UID: \"b61488dd-2db1-43b5-996b-43b76a5dbda6\") " pod="openstack/cinder-db-sync-8cz2f" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.323484 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b61488dd-2db1-43b5-996b-43b76a5dbda6-db-sync-config-data\") pod \"cinder-db-sync-8cz2f\" (UID: \"b61488dd-2db1-43b5-996b-43b76a5dbda6\") " pod="openstack/cinder-db-sync-8cz2f" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.324064 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b61488dd-2db1-43b5-996b-43b76a5dbda6-combined-ca-bundle\") pod \"cinder-db-sync-8cz2f\" (UID: \"b61488dd-2db1-43b5-996b-43b76a5dbda6\") " pod="openstack/cinder-db-sync-8cz2f" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.326351 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b61488dd-2db1-43b5-996b-43b76a5dbda6-config-data\") pod \"cinder-db-sync-8cz2f\" (UID: \"b61488dd-2db1-43b5-996b-43b76a5dbda6\") " pod="openstack/cinder-db-sync-8cz2f" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.329820 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hdp96\" (UniqueName: \"kubernetes.io/projected/b61488dd-2db1-43b5-996b-43b76a5dbda6-kube-api-access-hdp96\") pod \"cinder-db-sync-8cz2f\" (UID: \"b61488dd-2db1-43b5-996b-43b76a5dbda6\") " pod="openstack/cinder-db-sync-8cz2f" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.362074 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6bf59f66bf-7hgt9"] Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.366353 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bf59f66bf-7hgt9" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.377868 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-tvs68"] Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.379211 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-tvs68" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.383413 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-8jkmh" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.386347 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.388301 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.388543 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.389381 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.394827 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6bf59f66bf-7hgt9"] Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.395196 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.395474 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.401128 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-tvs68"] Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.408339 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.419137 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e5102115-b63e-42e2-8aae-1a68e7dda37c-db-sync-config-data\") pod \"barbican-db-sync-m47ck\" (UID: \"e5102115-b63e-42e2-8aae-1a68e7dda37c\") " pod="openstack/barbican-db-sync-m47ck" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.419292 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/92dd2b99-d1ce-4bdf-a002-94828c44bb40-ovsdbserver-sb\") pod \"dnsmasq-dns-6bf59f66bf-7hgt9\" (UID: \"92dd2b99-d1ce-4bdf-a002-94828c44bb40\") " pod="openstack/dnsmasq-dns-6bf59f66bf-7hgt9" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.419500 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/944fb1ae-3bf4-479a-863d-62867fdf5b82-combined-ca-bundle\") pod \"neutron-db-sync-26kdp\" (UID: \"944fb1ae-3bf4-479a-863d-62867fdf5b82\") " pod="openstack/neutron-db-sync-26kdp" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.426681 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e5102115-b63e-42e2-8aae-1a68e7dda37c-db-sync-config-data\") pod \"barbican-db-sync-m47ck\" (UID: \"e5102115-b63e-42e2-8aae-1a68e7dda37c\") " pod="openstack/barbican-db-sync-m47ck" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.428259 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/92dd2b99-d1ce-4bdf-a002-94828c44bb40-config\") pod \"dnsmasq-dns-6bf59f66bf-7hgt9\" (UID: \"92dd2b99-d1ce-4bdf-a002-94828c44bb40\") " pod="openstack/dnsmasq-dns-6bf59f66bf-7hgt9" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.428331 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/92dd2b99-d1ce-4bdf-a002-94828c44bb40-dns-svc\") pod \"dnsmasq-dns-6bf59f66bf-7hgt9\" (UID: \"92dd2b99-d1ce-4bdf-a002-94828c44bb40\") " pod="openstack/dnsmasq-dns-6bf59f66bf-7hgt9" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.428427 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/944fb1ae-3bf4-479a-863d-62867fdf5b82-config\") pod \"neutron-db-sync-26kdp\" (UID: \"944fb1ae-3bf4-479a-863d-62867fdf5b82\") " pod="openstack/neutron-db-sync-26kdp" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.428498 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8nqc5\" (UniqueName: \"kubernetes.io/projected/944fb1ae-3bf4-479a-863d-62867fdf5b82-kube-api-access-8nqc5\") pod \"neutron-db-sync-26kdp\" (UID: \"944fb1ae-3bf4-479a-863d-62867fdf5b82\") " pod="openstack/neutron-db-sync-26kdp" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.428532 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7t567\" (UniqueName: \"kubernetes.io/projected/e5102115-b63e-42e2-8aae-1a68e7dda37c-kube-api-access-7t567\") pod \"barbican-db-sync-m47ck\" (UID: \"e5102115-b63e-42e2-8aae-1a68e7dda37c\") " pod="openstack/barbican-db-sync-m47ck" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.428553 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qzn2b\" (UniqueName: \"kubernetes.io/projected/92dd2b99-d1ce-4bdf-a002-94828c44bb40-kube-api-access-qzn2b\") pod \"dnsmasq-dns-6bf59f66bf-7hgt9\" (UID: \"92dd2b99-d1ce-4bdf-a002-94828c44bb40\") " pod="openstack/dnsmasq-dns-6bf59f66bf-7hgt9" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.428632 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5102115-b63e-42e2-8aae-1a68e7dda37c-combined-ca-bundle\") pod \"barbican-db-sync-m47ck\" (UID: \"e5102115-b63e-42e2-8aae-1a68e7dda37c\") " pod="openstack/barbican-db-sync-m47ck" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.428672 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/92dd2b99-d1ce-4bdf-a002-94828c44bb40-ovsdbserver-nb\") pod \"dnsmasq-dns-6bf59f66bf-7hgt9\" (UID: \"92dd2b99-d1ce-4bdf-a002-94828c44bb40\") " pod="openstack/dnsmasq-dns-6bf59f66bf-7hgt9" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.437814 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/944fb1ae-3bf4-479a-863d-62867fdf5b82-combined-ca-bundle\") pod \"neutron-db-sync-26kdp\" (UID: \"944fb1ae-3bf4-479a-863d-62867fdf5b82\") " pod="openstack/neutron-db-sync-26kdp" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.438843 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5102115-b63e-42e2-8aae-1a68e7dda37c-combined-ca-bundle\") pod \"barbican-db-sync-m47ck\" (UID: \"e5102115-b63e-42e2-8aae-1a68e7dda37c\") " pod="openstack/barbican-db-sync-m47ck" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.442232 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/944fb1ae-3bf4-479a-863d-62867fdf5b82-config\") pod \"neutron-db-sync-26kdp\" (UID: \"944fb1ae-3bf4-479a-863d-62867fdf5b82\") " pod="openstack/neutron-db-sync-26kdp" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.462341 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8nqc5\" (UniqueName: \"kubernetes.io/projected/944fb1ae-3bf4-479a-863d-62867fdf5b82-kube-api-access-8nqc5\") pod \"neutron-db-sync-26kdp\" (UID: \"944fb1ae-3bf4-479a-863d-62867fdf5b82\") " pod="openstack/neutron-db-sync-26kdp" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.490178 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7t567\" (UniqueName: \"kubernetes.io/projected/e5102115-b63e-42e2-8aae-1a68e7dda37c-kube-api-access-7t567\") pod \"barbican-db-sync-m47ck\" (UID: \"e5102115-b63e-42e2-8aae-1a68e7dda37c\") " pod="openstack/barbican-db-sync-m47ck" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.531461 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/92dd2b99-d1ce-4bdf-a002-94828c44bb40-ovsdbserver-sb\") pod \"dnsmasq-dns-6bf59f66bf-7hgt9\" (UID: \"92dd2b99-d1ce-4bdf-a002-94828c44bb40\") " pod="openstack/dnsmasq-dns-6bf59f66bf-7hgt9" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.531534 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/550fe8e4-32aa-40c5-b925-f6c496dfcdf2-log-httpd\") pod \"ceilometer-0\" (UID: \"550fe8e4-32aa-40c5-b925-f6c496dfcdf2\") " pod="openstack/ceilometer-0" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.531555 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5trxn\" (UniqueName: \"kubernetes.io/projected/550fe8e4-32aa-40c5-b925-f6c496dfcdf2-kube-api-access-5trxn\") pod \"ceilometer-0\" (UID: \"550fe8e4-32aa-40c5-b925-f6c496dfcdf2\") " pod="openstack/ceilometer-0" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.531606 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/550fe8e4-32aa-40c5-b925-f6c496dfcdf2-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"550fe8e4-32aa-40c5-b925-f6c496dfcdf2\") " pod="openstack/ceilometer-0" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.531648 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5782c62d-fdcd-43f5-9af1-c84968e501ed-combined-ca-bundle\") pod \"placement-db-sync-tvs68\" (UID: \"5782c62d-fdcd-43f5-9af1-c84968e501ed\") " pod="openstack/placement-db-sync-tvs68" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.531675 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2mqdd\" (UniqueName: \"kubernetes.io/projected/5782c62d-fdcd-43f5-9af1-c84968e501ed-kube-api-access-2mqdd\") pod \"placement-db-sync-tvs68\" (UID: \"5782c62d-fdcd-43f5-9af1-c84968e501ed\") " pod="openstack/placement-db-sync-tvs68" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.531697 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/550fe8e4-32aa-40c5-b925-f6c496dfcdf2-scripts\") pod \"ceilometer-0\" (UID: \"550fe8e4-32aa-40c5-b925-f6c496dfcdf2\") " pod="openstack/ceilometer-0" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.531715 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/92dd2b99-d1ce-4bdf-a002-94828c44bb40-config\") pod \"dnsmasq-dns-6bf59f66bf-7hgt9\" (UID: \"92dd2b99-d1ce-4bdf-a002-94828c44bb40\") " pod="openstack/dnsmasq-dns-6bf59f66bf-7hgt9" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.531730 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/92dd2b99-d1ce-4bdf-a002-94828c44bb40-dns-svc\") pod \"dnsmasq-dns-6bf59f66bf-7hgt9\" (UID: \"92dd2b99-d1ce-4bdf-a002-94828c44bb40\") " pod="openstack/dnsmasq-dns-6bf59f66bf-7hgt9" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.531768 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5782c62d-fdcd-43f5-9af1-c84968e501ed-config-data\") pod \"placement-db-sync-tvs68\" (UID: \"5782c62d-fdcd-43f5-9af1-c84968e501ed\") " pod="openstack/placement-db-sync-tvs68" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.531790 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5782c62d-fdcd-43f5-9af1-c84968e501ed-logs\") pod \"placement-db-sync-tvs68\" (UID: \"5782c62d-fdcd-43f5-9af1-c84968e501ed\") " pod="openstack/placement-db-sync-tvs68" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.531820 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qzn2b\" (UniqueName: \"kubernetes.io/projected/92dd2b99-d1ce-4bdf-a002-94828c44bb40-kube-api-access-qzn2b\") pod \"dnsmasq-dns-6bf59f66bf-7hgt9\" (UID: \"92dd2b99-d1ce-4bdf-a002-94828c44bb40\") " pod="openstack/dnsmasq-dns-6bf59f66bf-7hgt9" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.531842 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/550fe8e4-32aa-40c5-b925-f6c496dfcdf2-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"550fe8e4-32aa-40c5-b925-f6c496dfcdf2\") " pod="openstack/ceilometer-0" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.531869 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/550fe8e4-32aa-40c5-b925-f6c496dfcdf2-config-data\") pod \"ceilometer-0\" (UID: \"550fe8e4-32aa-40c5-b925-f6c496dfcdf2\") " pod="openstack/ceilometer-0" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.531892 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/550fe8e4-32aa-40c5-b925-f6c496dfcdf2-run-httpd\") pod \"ceilometer-0\" (UID: \"550fe8e4-32aa-40c5-b925-f6c496dfcdf2\") " pod="openstack/ceilometer-0" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.531912 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/92dd2b99-d1ce-4bdf-a002-94828c44bb40-ovsdbserver-nb\") pod \"dnsmasq-dns-6bf59f66bf-7hgt9\" (UID: \"92dd2b99-d1ce-4bdf-a002-94828c44bb40\") " pod="openstack/dnsmasq-dns-6bf59f66bf-7hgt9" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.531952 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5782c62d-fdcd-43f5-9af1-c84968e501ed-scripts\") pod \"placement-db-sync-tvs68\" (UID: \"5782c62d-fdcd-43f5-9af1-c84968e501ed\") " pod="openstack/placement-db-sync-tvs68" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.532967 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/92dd2b99-d1ce-4bdf-a002-94828c44bb40-ovsdbserver-sb\") pod \"dnsmasq-dns-6bf59f66bf-7hgt9\" (UID: \"92dd2b99-d1ce-4bdf-a002-94828c44bb40\") " pod="openstack/dnsmasq-dns-6bf59f66bf-7hgt9" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.537263 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/92dd2b99-d1ce-4bdf-a002-94828c44bb40-dns-svc\") pod \"dnsmasq-dns-6bf59f66bf-7hgt9\" (UID: \"92dd2b99-d1ce-4bdf-a002-94828c44bb40\") " pod="openstack/dnsmasq-dns-6bf59f66bf-7hgt9" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.537471 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/92dd2b99-d1ce-4bdf-a002-94828c44bb40-ovsdbserver-nb\") pod \"dnsmasq-dns-6bf59f66bf-7hgt9\" (UID: \"92dd2b99-d1ce-4bdf-a002-94828c44bb40\") " pod="openstack/dnsmasq-dns-6bf59f66bf-7hgt9" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.537945 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/92dd2b99-d1ce-4bdf-a002-94828c44bb40-config\") pod \"dnsmasq-dns-6bf59f66bf-7hgt9\" (UID: \"92dd2b99-d1ce-4bdf-a002-94828c44bb40\") " pod="openstack/dnsmasq-dns-6bf59f66bf-7hgt9" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.559929 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-8cz2f" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.567836 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qzn2b\" (UniqueName: \"kubernetes.io/projected/92dd2b99-d1ce-4bdf-a002-94828c44bb40-kube-api-access-qzn2b\") pod \"dnsmasq-dns-6bf59f66bf-7hgt9\" (UID: \"92dd2b99-d1ce-4bdf-a002-94828c44bb40\") " pod="openstack/dnsmasq-dns-6bf59f66bf-7hgt9" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.633387 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5782c62d-fdcd-43f5-9af1-c84968e501ed-config-data\") pod \"placement-db-sync-tvs68\" (UID: \"5782c62d-fdcd-43f5-9af1-c84968e501ed\") " pod="openstack/placement-db-sync-tvs68" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.633901 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5782c62d-fdcd-43f5-9af1-c84968e501ed-logs\") pod \"placement-db-sync-tvs68\" (UID: \"5782c62d-fdcd-43f5-9af1-c84968e501ed\") " pod="openstack/placement-db-sync-tvs68" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.633943 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/550fe8e4-32aa-40c5-b925-f6c496dfcdf2-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"550fe8e4-32aa-40c5-b925-f6c496dfcdf2\") " pod="openstack/ceilometer-0" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.633974 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/550fe8e4-32aa-40c5-b925-f6c496dfcdf2-config-data\") pod \"ceilometer-0\" (UID: \"550fe8e4-32aa-40c5-b925-f6c496dfcdf2\") " pod="openstack/ceilometer-0" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.634018 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/550fe8e4-32aa-40c5-b925-f6c496dfcdf2-run-httpd\") pod \"ceilometer-0\" (UID: \"550fe8e4-32aa-40c5-b925-f6c496dfcdf2\") " pod="openstack/ceilometer-0" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.634064 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5782c62d-fdcd-43f5-9af1-c84968e501ed-scripts\") pod \"placement-db-sync-tvs68\" (UID: \"5782c62d-fdcd-43f5-9af1-c84968e501ed\") " pod="openstack/placement-db-sync-tvs68" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.634098 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/550fe8e4-32aa-40c5-b925-f6c496dfcdf2-log-httpd\") pod \"ceilometer-0\" (UID: \"550fe8e4-32aa-40c5-b925-f6c496dfcdf2\") " pod="openstack/ceilometer-0" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.634118 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5trxn\" (UniqueName: \"kubernetes.io/projected/550fe8e4-32aa-40c5-b925-f6c496dfcdf2-kube-api-access-5trxn\") pod \"ceilometer-0\" (UID: \"550fe8e4-32aa-40c5-b925-f6c496dfcdf2\") " pod="openstack/ceilometer-0" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.634151 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/550fe8e4-32aa-40c5-b925-f6c496dfcdf2-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"550fe8e4-32aa-40c5-b925-f6c496dfcdf2\") " pod="openstack/ceilometer-0" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.634203 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5782c62d-fdcd-43f5-9af1-c84968e501ed-combined-ca-bundle\") pod \"placement-db-sync-tvs68\" (UID: \"5782c62d-fdcd-43f5-9af1-c84968e501ed\") " pod="openstack/placement-db-sync-tvs68" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.634225 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2mqdd\" (UniqueName: \"kubernetes.io/projected/5782c62d-fdcd-43f5-9af1-c84968e501ed-kube-api-access-2mqdd\") pod \"placement-db-sync-tvs68\" (UID: \"5782c62d-fdcd-43f5-9af1-c84968e501ed\") " pod="openstack/placement-db-sync-tvs68" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.634244 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/550fe8e4-32aa-40c5-b925-f6c496dfcdf2-scripts\") pod \"ceilometer-0\" (UID: \"550fe8e4-32aa-40c5-b925-f6c496dfcdf2\") " pod="openstack/ceilometer-0" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.636239 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/550fe8e4-32aa-40c5-b925-f6c496dfcdf2-log-httpd\") pod \"ceilometer-0\" (UID: \"550fe8e4-32aa-40c5-b925-f6c496dfcdf2\") " pod="openstack/ceilometer-0" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.640845 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/550fe8e4-32aa-40c5-b925-f6c496dfcdf2-scripts\") pod \"ceilometer-0\" (UID: \"550fe8e4-32aa-40c5-b925-f6c496dfcdf2\") " pod="openstack/ceilometer-0" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.641328 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/550fe8e4-32aa-40c5-b925-f6c496dfcdf2-run-httpd\") pod \"ceilometer-0\" (UID: \"550fe8e4-32aa-40c5-b925-f6c496dfcdf2\") " pod="openstack/ceilometer-0" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.642278 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/550fe8e4-32aa-40c5-b925-f6c496dfcdf2-config-data\") pod \"ceilometer-0\" (UID: \"550fe8e4-32aa-40c5-b925-f6c496dfcdf2\") " pod="openstack/ceilometer-0" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.642645 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5782c62d-fdcd-43f5-9af1-c84968e501ed-logs\") pod \"placement-db-sync-tvs68\" (UID: \"5782c62d-fdcd-43f5-9af1-c84968e501ed\") " pod="openstack/placement-db-sync-tvs68" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.642969 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/550fe8e4-32aa-40c5-b925-f6c496dfcdf2-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"550fe8e4-32aa-40c5-b925-f6c496dfcdf2\") " pod="openstack/ceilometer-0" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.643543 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-m47ck" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.644047 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5782c62d-fdcd-43f5-9af1-c84968e501ed-scripts\") pod \"placement-db-sync-tvs68\" (UID: \"5782c62d-fdcd-43f5-9af1-c84968e501ed\") " pod="openstack/placement-db-sync-tvs68" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.655924 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5782c62d-fdcd-43f5-9af1-c84968e501ed-combined-ca-bundle\") pod \"placement-db-sync-tvs68\" (UID: \"5782c62d-fdcd-43f5-9af1-c84968e501ed\") " pod="openstack/placement-db-sync-tvs68" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.658720 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5782c62d-fdcd-43f5-9af1-c84968e501ed-config-data\") pod \"placement-db-sync-tvs68\" (UID: \"5782c62d-fdcd-43f5-9af1-c84968e501ed\") " pod="openstack/placement-db-sync-tvs68" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.659558 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/550fe8e4-32aa-40c5-b925-f6c496dfcdf2-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"550fe8e4-32aa-40c5-b925-f6c496dfcdf2\") " pod="openstack/ceilometer-0" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.662635 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-26kdp" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.669747 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2mqdd\" (UniqueName: \"kubernetes.io/projected/5782c62d-fdcd-43f5-9af1-c84968e501ed-kube-api-access-2mqdd\") pod \"placement-db-sync-tvs68\" (UID: \"5782c62d-fdcd-43f5-9af1-c84968e501ed\") " pod="openstack/placement-db-sync-tvs68" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.685214 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5trxn\" (UniqueName: \"kubernetes.io/projected/550fe8e4-32aa-40c5-b925-f6c496dfcdf2-kube-api-access-5trxn\") pod \"ceilometer-0\" (UID: \"550fe8e4-32aa-40c5-b925-f6c496dfcdf2\") " pod="openstack/ceilometer-0" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.697381 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bf59f66bf-7hgt9" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.720928 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-tvs68" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.777808 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.915166 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-4ztk6"] Nov 21 14:03:44 crc kubenswrapper[5133]: I1121 14:03:44.927595 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-66fbd85b65-m9ffq"] Nov 21 14:03:45 crc kubenswrapper[5133]: I1121 14:03:45.207340 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-8cz2f"] Nov 21 14:03:45 crc kubenswrapper[5133]: I1121 14:03:45.426330 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-26kdp"] Nov 21 14:03:45 crc kubenswrapper[5133]: W1121 14:03:45.475446 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod944fb1ae_3bf4_479a_863d_62867fdf5b82.slice/crio-d8790ab86076f38b1697628d6ce947bf594edbb2832a9fb0045bd7dad402868d WatchSource:0}: Error finding container d8790ab86076f38b1697628d6ce947bf594edbb2832a9fb0045bd7dad402868d: Status 404 returned error can't find the container with id d8790ab86076f38b1697628d6ce947bf594edbb2832a9fb0045bd7dad402868d Nov 21 14:03:45 crc kubenswrapper[5133]: I1121 14:03:45.477537 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-8cz2f" event={"ID":"b61488dd-2db1-43b5-996b-43b76a5dbda6","Type":"ContainerStarted","Data":"c7a4e2a3250f5d9d66bbbbb2056fc3cd77c91041bad9ea407b0d0847b412edc6"} Nov 21 14:03:45 crc kubenswrapper[5133]: I1121 14:03:45.480765 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-4ztk6" event={"ID":"38d44e0f-f488-4cc9-bfa2-6eb2830cc241","Type":"ContainerStarted","Data":"32d4bf4981adfaeaf3086c173849e5a983169c9fd6b11609978a9f53c1f664d9"} Nov 21 14:03:45 crc kubenswrapper[5133]: I1121 14:03:45.480807 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-4ztk6" event={"ID":"38d44e0f-f488-4cc9-bfa2-6eb2830cc241","Type":"ContainerStarted","Data":"e4c51c83b1797515bd50aeddc2192a95966d1b8a44f6214604a6cb8b839c688a"} Nov 21 14:03:45 crc kubenswrapper[5133]: I1121 14:03:45.482508 5133 generic.go:334] "Generic (PLEG): container finished" podID="ccddde96-ed5a-4ebe-8f9d-9cb64115f434" containerID="301b8672e3edd29f1c695ded3c6b24f71412e033af66af4e18a180c049a2a64e" exitCode=0 Nov 21 14:03:45 crc kubenswrapper[5133]: I1121 14:03:45.482534 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-66fbd85b65-m9ffq" event={"ID":"ccddde96-ed5a-4ebe-8f9d-9cb64115f434","Type":"ContainerDied","Data":"301b8672e3edd29f1c695ded3c6b24f71412e033af66af4e18a180c049a2a64e"} Nov 21 14:03:45 crc kubenswrapper[5133]: I1121 14:03:45.482551 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-66fbd85b65-m9ffq" event={"ID":"ccddde96-ed5a-4ebe-8f9d-9cb64115f434","Type":"ContainerStarted","Data":"c4b3cfd4815806b1edb97f38eb5b481523e40ee5d55a94e7b798678e6790398d"} Nov 21 14:03:45 crc kubenswrapper[5133]: I1121 14:03:45.574015 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-m47ck"] Nov 21 14:03:45 crc kubenswrapper[5133]: I1121 14:03:45.598348 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6bf59f66bf-7hgt9"] Nov 21 14:03:45 crc kubenswrapper[5133]: I1121 14:03:45.697700 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-tvs68"] Nov 21 14:03:45 crc kubenswrapper[5133]: I1121 14:03:45.713487 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 21 14:03:45 crc kubenswrapper[5133]: I1121 14:03:45.888370 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-66fbd85b65-m9ffq" Nov 21 14:03:45 crc kubenswrapper[5133]: I1121 14:03:45.973252 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ccddde96-ed5a-4ebe-8f9d-9cb64115f434-ovsdbserver-nb\") pod \"ccddde96-ed5a-4ebe-8f9d-9cb64115f434\" (UID: \"ccddde96-ed5a-4ebe-8f9d-9cb64115f434\") " Nov 21 14:03:45 crc kubenswrapper[5133]: I1121 14:03:45.973720 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ccddde96-ed5a-4ebe-8f9d-9cb64115f434-ovsdbserver-sb\") pod \"ccddde96-ed5a-4ebe-8f9d-9cb64115f434\" (UID: \"ccddde96-ed5a-4ebe-8f9d-9cb64115f434\") " Nov 21 14:03:45 crc kubenswrapper[5133]: I1121 14:03:45.973762 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t6wb6\" (UniqueName: \"kubernetes.io/projected/ccddde96-ed5a-4ebe-8f9d-9cb64115f434-kube-api-access-t6wb6\") pod \"ccddde96-ed5a-4ebe-8f9d-9cb64115f434\" (UID: \"ccddde96-ed5a-4ebe-8f9d-9cb64115f434\") " Nov 21 14:03:45 crc kubenswrapper[5133]: I1121 14:03:45.973883 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ccddde96-ed5a-4ebe-8f9d-9cb64115f434-dns-svc\") pod \"ccddde96-ed5a-4ebe-8f9d-9cb64115f434\" (UID: \"ccddde96-ed5a-4ebe-8f9d-9cb64115f434\") " Nov 21 14:03:45 crc kubenswrapper[5133]: I1121 14:03:45.973920 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ccddde96-ed5a-4ebe-8f9d-9cb64115f434-config\") pod \"ccddde96-ed5a-4ebe-8f9d-9cb64115f434\" (UID: \"ccddde96-ed5a-4ebe-8f9d-9cb64115f434\") " Nov 21 14:03:45 crc kubenswrapper[5133]: I1121 14:03:45.990199 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ccddde96-ed5a-4ebe-8f9d-9cb64115f434-kube-api-access-t6wb6" (OuterVolumeSpecName: "kube-api-access-t6wb6") pod "ccddde96-ed5a-4ebe-8f9d-9cb64115f434" (UID: "ccddde96-ed5a-4ebe-8f9d-9cb64115f434"). InnerVolumeSpecName "kube-api-access-t6wb6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:03:46 crc kubenswrapper[5133]: I1121 14:03:46.000084 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ccddde96-ed5a-4ebe-8f9d-9cb64115f434-config" (OuterVolumeSpecName: "config") pod "ccddde96-ed5a-4ebe-8f9d-9cb64115f434" (UID: "ccddde96-ed5a-4ebe-8f9d-9cb64115f434"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:03:46 crc kubenswrapper[5133]: I1121 14:03:46.004719 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ccddde96-ed5a-4ebe-8f9d-9cb64115f434-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "ccddde96-ed5a-4ebe-8f9d-9cb64115f434" (UID: "ccddde96-ed5a-4ebe-8f9d-9cb64115f434"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:03:46 crc kubenswrapper[5133]: I1121 14:03:46.071437 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ccddde96-ed5a-4ebe-8f9d-9cb64115f434-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "ccddde96-ed5a-4ebe-8f9d-9cb64115f434" (UID: "ccddde96-ed5a-4ebe-8f9d-9cb64115f434"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:03:46 crc kubenswrapper[5133]: I1121 14:03:46.071485 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ccddde96-ed5a-4ebe-8f9d-9cb64115f434-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "ccddde96-ed5a-4ebe-8f9d-9cb64115f434" (UID: "ccddde96-ed5a-4ebe-8f9d-9cb64115f434"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:03:46 crc kubenswrapper[5133]: I1121 14:03:46.077133 5133 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ccddde96-ed5a-4ebe-8f9d-9cb64115f434-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 21 14:03:46 crc kubenswrapper[5133]: I1121 14:03:46.077183 5133 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ccddde96-ed5a-4ebe-8f9d-9cb64115f434-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 21 14:03:46 crc kubenswrapper[5133]: I1121 14:03:46.077199 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t6wb6\" (UniqueName: \"kubernetes.io/projected/ccddde96-ed5a-4ebe-8f9d-9cb64115f434-kube-api-access-t6wb6\") on node \"crc\" DevicePath \"\"" Nov 21 14:03:46 crc kubenswrapper[5133]: I1121 14:03:46.077215 5133 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ccddde96-ed5a-4ebe-8f9d-9cb64115f434-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 21 14:03:46 crc kubenswrapper[5133]: I1121 14:03:46.077227 5133 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ccddde96-ed5a-4ebe-8f9d-9cb64115f434-config\") on node \"crc\" DevicePath \"\"" Nov 21 14:03:46 crc kubenswrapper[5133]: I1121 14:03:46.239344 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 21 14:03:46 crc kubenswrapper[5133]: I1121 14:03:46.508689 5133 generic.go:334] "Generic (PLEG): container finished" podID="92dd2b99-d1ce-4bdf-a002-94828c44bb40" containerID="aef9c7821dcd9e335b937f09586778fdd2db25d1a69ba8a1cd62a5b313a55c27" exitCode=0 Nov 21 14:03:46 crc kubenswrapper[5133]: I1121 14:03:46.508827 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bf59f66bf-7hgt9" event={"ID":"92dd2b99-d1ce-4bdf-a002-94828c44bb40","Type":"ContainerDied","Data":"aef9c7821dcd9e335b937f09586778fdd2db25d1a69ba8a1cd62a5b313a55c27"} Nov 21 14:03:46 crc kubenswrapper[5133]: I1121 14:03:46.508878 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bf59f66bf-7hgt9" event={"ID":"92dd2b99-d1ce-4bdf-a002-94828c44bb40","Type":"ContainerStarted","Data":"7edfe4e12bdc90b8a43bf14f784c63dac70f98603967d7e3ad689dd2137fa74f"} Nov 21 14:03:46 crc kubenswrapper[5133]: I1121 14:03:46.519774 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-26kdp" event={"ID":"944fb1ae-3bf4-479a-863d-62867fdf5b82","Type":"ContainerStarted","Data":"1b012093ae77969b0a3c7baae1bd9fe3b1c9a037d5139292c62a7d2f76c2c18e"} Nov 21 14:03:46 crc kubenswrapper[5133]: I1121 14:03:46.519844 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-26kdp" event={"ID":"944fb1ae-3bf4-479a-863d-62867fdf5b82","Type":"ContainerStarted","Data":"d8790ab86076f38b1697628d6ce947bf594edbb2832a9fb0045bd7dad402868d"} Nov 21 14:03:46 crc kubenswrapper[5133]: I1121 14:03:46.527484 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"550fe8e4-32aa-40c5-b925-f6c496dfcdf2","Type":"ContainerStarted","Data":"a802e6ea8163769d6f10ff6785d00458d9a768278dcf424c50bf80f589ff1b2d"} Nov 21 14:03:46 crc kubenswrapper[5133]: I1121 14:03:46.555596 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-66fbd85b65-m9ffq" event={"ID":"ccddde96-ed5a-4ebe-8f9d-9cb64115f434","Type":"ContainerDied","Data":"c4b3cfd4815806b1edb97f38eb5b481523e40ee5d55a94e7b798678e6790398d"} Nov 21 14:03:46 crc kubenswrapper[5133]: I1121 14:03:46.556095 5133 scope.go:117] "RemoveContainer" containerID="301b8672e3edd29f1c695ded3c6b24f71412e033af66af4e18a180c049a2a64e" Nov 21 14:03:46 crc kubenswrapper[5133]: I1121 14:03:46.556029 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-66fbd85b65-m9ffq" Nov 21 14:03:46 crc kubenswrapper[5133]: I1121 14:03:46.561477 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-tvs68" event={"ID":"5782c62d-fdcd-43f5-9af1-c84968e501ed","Type":"ContainerStarted","Data":"7ba367f29902ff91c41d5f9dc94241887aed309de045205f754d203e86089e58"} Nov 21 14:03:46 crc kubenswrapper[5133]: I1121 14:03:46.577609 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-26kdp" podStartSLOduration=2.5775748849999998 podStartE2EDuration="2.577574885s" podCreationTimestamp="2025-11-21 14:03:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 14:03:46.565300077 +0000 UTC m=+1286.363132325" watchObservedRunningTime="2025-11-21 14:03:46.577574885 +0000 UTC m=+1286.375407133" Nov 21 14:03:46 crc kubenswrapper[5133]: I1121 14:03:46.582075 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-m47ck" event={"ID":"e5102115-b63e-42e2-8aae-1a68e7dda37c","Type":"ContainerStarted","Data":"1379406d2cc782d96ee5146cb934579b91a90bdb915702689845f9db9234ce8c"} Nov 21 14:03:46 crc kubenswrapper[5133]: I1121 14:03:46.616507 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-4ztk6" podStartSLOduration=3.616470703 podStartE2EDuration="3.616470703s" podCreationTimestamp="2025-11-21 14:03:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 14:03:46.602731866 +0000 UTC m=+1286.400564104" watchObservedRunningTime="2025-11-21 14:03:46.616470703 +0000 UTC m=+1286.414302951" Nov 21 14:03:46 crc kubenswrapper[5133]: I1121 14:03:46.796537 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-66fbd85b65-m9ffq"] Nov 21 14:03:46 crc kubenswrapper[5133]: I1121 14:03:46.803007 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-66fbd85b65-m9ffq"] Nov 21 14:03:47 crc kubenswrapper[5133]: I1121 14:03:47.602499 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bf59f66bf-7hgt9" event={"ID":"92dd2b99-d1ce-4bdf-a002-94828c44bb40","Type":"ContainerStarted","Data":"4a5564c86af0c6c595c719fb4772e5581e5b38cf3a2321ee5521d1dbeee04cca"} Nov 21 14:03:47 crc kubenswrapper[5133]: I1121 14:03:47.603423 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6bf59f66bf-7hgt9" Nov 21 14:03:47 crc kubenswrapper[5133]: I1121 14:03:47.637448 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6bf59f66bf-7hgt9" podStartSLOduration=3.6374174740000003 podStartE2EDuration="3.637417474s" podCreationTimestamp="2025-11-21 14:03:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 14:03:47.626633346 +0000 UTC m=+1287.424465614" watchObservedRunningTime="2025-11-21 14:03:47.637417474 +0000 UTC m=+1287.435249722" Nov 21 14:03:48 crc kubenswrapper[5133]: I1121 14:03:48.481655 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ccddde96-ed5a-4ebe-8f9d-9cb64115f434" path="/var/lib/kubelet/pods/ccddde96-ed5a-4ebe-8f9d-9cb64115f434/volumes" Nov 21 14:03:54 crc kubenswrapper[5133]: I1121 14:03:54.705982 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6bf59f66bf-7hgt9" Nov 21 14:03:54 crc kubenswrapper[5133]: I1121 14:03:54.794730 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8554648995-bdzkx"] Nov 21 14:03:54 crc kubenswrapper[5133]: I1121 14:03:54.795147 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-8554648995-bdzkx" podUID="c34f5cc7-8dfa-48be-8392-0c4f8bb86208" containerName="dnsmasq-dns" containerID="cri-o://d1cf7b7258cb63743b20f8c49032ed6e0de6e4fe067e736963cddf65b034e063" gracePeriod=10 Nov 21 14:03:55 crc kubenswrapper[5133]: I1121 14:03:55.717705 5133 generic.go:334] "Generic (PLEG): container finished" podID="38d44e0f-f488-4cc9-bfa2-6eb2830cc241" containerID="32d4bf4981adfaeaf3086c173849e5a983169c9fd6b11609978a9f53c1f664d9" exitCode=0 Nov 21 14:03:55 crc kubenswrapper[5133]: I1121 14:03:55.717812 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-4ztk6" event={"ID":"38d44e0f-f488-4cc9-bfa2-6eb2830cc241","Type":"ContainerDied","Data":"32d4bf4981adfaeaf3086c173849e5a983169c9fd6b11609978a9f53c1f664d9"} Nov 21 14:03:55 crc kubenswrapper[5133]: I1121 14:03:55.730389 5133 generic.go:334] "Generic (PLEG): container finished" podID="c34f5cc7-8dfa-48be-8392-0c4f8bb86208" containerID="d1cf7b7258cb63743b20f8c49032ed6e0de6e4fe067e736963cddf65b034e063" exitCode=0 Nov 21 14:03:55 crc kubenswrapper[5133]: I1121 14:03:55.730443 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-bdzkx" event={"ID":"c34f5cc7-8dfa-48be-8392-0c4f8bb86208","Type":"ContainerDied","Data":"d1cf7b7258cb63743b20f8c49032ed6e0de6e4fe067e736963cddf65b034e063"} Nov 21 14:03:56 crc kubenswrapper[5133]: I1121 14:03:56.362630 5133 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-8554648995-bdzkx" podUID="c34f5cc7-8dfa-48be-8392-0c4f8bb86208" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.111:5353: connect: connection refused" Nov 21 14:04:01 crc kubenswrapper[5133]: I1121 14:04:01.364638 5133 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-8554648995-bdzkx" podUID="c34f5cc7-8dfa-48be-8392-0c4f8bb86208" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.111:5353: connect: connection refused" Nov 21 14:04:06 crc kubenswrapper[5133]: I1121 14:04:06.361573 5133 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-8554648995-bdzkx" podUID="c34f5cc7-8dfa-48be-8392-0c4f8bb86208" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.111:5353: connect: connection refused" Nov 21 14:04:06 crc kubenswrapper[5133]: I1121 14:04:06.362345 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-8554648995-bdzkx" Nov 21 14:04:07 crc kubenswrapper[5133]: E1121 14:04:07.646267 5133 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified" Nov 21 14:04:07 crc kubenswrapper[5133]: E1121 14:04:07.646982 5133 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:barbican-db-sync,Image:quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified,Command:[/bin/bash],Args:[-c barbican-manage db upgrade],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/barbican/barbican.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-7t567,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42403,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42403,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-db-sync-m47ck_openstack(e5102115-b63e-42e2-8aae-1a68e7dda37c): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 21 14:04:07 crc kubenswrapper[5133]: E1121 14:04:07.648591 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/barbican-db-sync-m47ck" podUID="e5102115-b63e-42e2-8aae-1a68e7dda37c" Nov 21 14:04:07 crc kubenswrapper[5133]: I1121 14:04:07.747635 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-4ztk6" Nov 21 14:04:07 crc kubenswrapper[5133]: I1121 14:04:07.838789 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/38d44e0f-f488-4cc9-bfa2-6eb2830cc241-credential-keys\") pod \"38d44e0f-f488-4cc9-bfa2-6eb2830cc241\" (UID: \"38d44e0f-f488-4cc9-bfa2-6eb2830cc241\") " Nov 21 14:04:07 crc kubenswrapper[5133]: I1121 14:04:07.839057 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/38d44e0f-f488-4cc9-bfa2-6eb2830cc241-config-data\") pod \"38d44e0f-f488-4cc9-bfa2-6eb2830cc241\" (UID: \"38d44e0f-f488-4cc9-bfa2-6eb2830cc241\") " Nov 21 14:04:07 crc kubenswrapper[5133]: I1121 14:04:07.839142 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38d44e0f-f488-4cc9-bfa2-6eb2830cc241-combined-ca-bundle\") pod \"38d44e0f-f488-4cc9-bfa2-6eb2830cc241\" (UID: \"38d44e0f-f488-4cc9-bfa2-6eb2830cc241\") " Nov 21 14:04:07 crc kubenswrapper[5133]: I1121 14:04:07.839268 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/38d44e0f-f488-4cc9-bfa2-6eb2830cc241-fernet-keys\") pod \"38d44e0f-f488-4cc9-bfa2-6eb2830cc241\" (UID: \"38d44e0f-f488-4cc9-bfa2-6eb2830cc241\") " Nov 21 14:04:07 crc kubenswrapper[5133]: I1121 14:04:07.839312 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2lhrm\" (UniqueName: \"kubernetes.io/projected/38d44e0f-f488-4cc9-bfa2-6eb2830cc241-kube-api-access-2lhrm\") pod \"38d44e0f-f488-4cc9-bfa2-6eb2830cc241\" (UID: \"38d44e0f-f488-4cc9-bfa2-6eb2830cc241\") " Nov 21 14:04:07 crc kubenswrapper[5133]: I1121 14:04:07.839443 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/38d44e0f-f488-4cc9-bfa2-6eb2830cc241-scripts\") pod \"38d44e0f-f488-4cc9-bfa2-6eb2830cc241\" (UID: \"38d44e0f-f488-4cc9-bfa2-6eb2830cc241\") " Nov 21 14:04:07 crc kubenswrapper[5133]: I1121 14:04:07.849976 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38d44e0f-f488-4cc9-bfa2-6eb2830cc241-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "38d44e0f-f488-4cc9-bfa2-6eb2830cc241" (UID: "38d44e0f-f488-4cc9-bfa2-6eb2830cc241"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:04:07 crc kubenswrapper[5133]: I1121 14:04:07.852271 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38d44e0f-f488-4cc9-bfa2-6eb2830cc241-scripts" (OuterVolumeSpecName: "scripts") pod "38d44e0f-f488-4cc9-bfa2-6eb2830cc241" (UID: "38d44e0f-f488-4cc9-bfa2-6eb2830cc241"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:04:07 crc kubenswrapper[5133]: I1121 14:04:07.852336 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38d44e0f-f488-4cc9-bfa2-6eb2830cc241-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "38d44e0f-f488-4cc9-bfa2-6eb2830cc241" (UID: "38d44e0f-f488-4cc9-bfa2-6eb2830cc241"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:04:07 crc kubenswrapper[5133]: I1121 14:04:07.852509 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/38d44e0f-f488-4cc9-bfa2-6eb2830cc241-kube-api-access-2lhrm" (OuterVolumeSpecName: "kube-api-access-2lhrm") pod "38d44e0f-f488-4cc9-bfa2-6eb2830cc241" (UID: "38d44e0f-f488-4cc9-bfa2-6eb2830cc241"). InnerVolumeSpecName "kube-api-access-2lhrm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:04:07 crc kubenswrapper[5133]: I1121 14:04:07.875207 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-4ztk6" Nov 21 14:04:07 crc kubenswrapper[5133]: I1121 14:04:07.875220 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-4ztk6" event={"ID":"38d44e0f-f488-4cc9-bfa2-6eb2830cc241","Type":"ContainerDied","Data":"e4c51c83b1797515bd50aeddc2192a95966d1b8a44f6214604a6cb8b839c688a"} Nov 21 14:04:07 crc kubenswrapper[5133]: I1121 14:04:07.875278 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e4c51c83b1797515bd50aeddc2192a95966d1b8a44f6214604a6cb8b839c688a" Nov 21 14:04:07 crc kubenswrapper[5133]: E1121 14:04:07.879292 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified\\\"\"" pod="openstack/barbican-db-sync-m47ck" podUID="e5102115-b63e-42e2-8aae-1a68e7dda37c" Nov 21 14:04:07 crc kubenswrapper[5133]: I1121 14:04:07.886399 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38d44e0f-f488-4cc9-bfa2-6eb2830cc241-config-data" (OuterVolumeSpecName: "config-data") pod "38d44e0f-f488-4cc9-bfa2-6eb2830cc241" (UID: "38d44e0f-f488-4cc9-bfa2-6eb2830cc241"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:04:07 crc kubenswrapper[5133]: I1121 14:04:07.888179 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38d44e0f-f488-4cc9-bfa2-6eb2830cc241-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "38d44e0f-f488-4cc9-bfa2-6eb2830cc241" (UID: "38d44e0f-f488-4cc9-bfa2-6eb2830cc241"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:04:07 crc kubenswrapper[5133]: I1121 14:04:07.941697 5133 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38d44e0f-f488-4cc9-bfa2-6eb2830cc241-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:07 crc kubenswrapper[5133]: I1121 14:04:07.941813 5133 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/38d44e0f-f488-4cc9-bfa2-6eb2830cc241-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:07 crc kubenswrapper[5133]: I1121 14:04:07.941826 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2lhrm\" (UniqueName: \"kubernetes.io/projected/38d44e0f-f488-4cc9-bfa2-6eb2830cc241-kube-api-access-2lhrm\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:07 crc kubenswrapper[5133]: I1121 14:04:07.941834 5133 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/38d44e0f-f488-4cc9-bfa2-6eb2830cc241-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:07 crc kubenswrapper[5133]: I1121 14:04:07.941844 5133 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/38d44e0f-f488-4cc9-bfa2-6eb2830cc241-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:07 crc kubenswrapper[5133]: I1121 14:04:07.941852 5133 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/38d44e0f-f488-4cc9-bfa2-6eb2830cc241-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:08 crc kubenswrapper[5133]: I1121 14:04:08.845343 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-4ztk6"] Nov 21 14:04:08 crc kubenswrapper[5133]: I1121 14:04:08.852256 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-4ztk6"] Nov 21 14:04:08 crc kubenswrapper[5133]: I1121 14:04:08.945647 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-dcmgn"] Nov 21 14:04:08 crc kubenswrapper[5133]: E1121 14:04:08.946238 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ccddde96-ed5a-4ebe-8f9d-9cb64115f434" containerName="init" Nov 21 14:04:08 crc kubenswrapper[5133]: I1121 14:04:08.946277 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="ccddde96-ed5a-4ebe-8f9d-9cb64115f434" containerName="init" Nov 21 14:04:08 crc kubenswrapper[5133]: E1121 14:04:08.946297 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38d44e0f-f488-4cc9-bfa2-6eb2830cc241" containerName="keystone-bootstrap" Nov 21 14:04:08 crc kubenswrapper[5133]: I1121 14:04:08.946303 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="38d44e0f-f488-4cc9-bfa2-6eb2830cc241" containerName="keystone-bootstrap" Nov 21 14:04:08 crc kubenswrapper[5133]: I1121 14:04:08.946532 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="38d44e0f-f488-4cc9-bfa2-6eb2830cc241" containerName="keystone-bootstrap" Nov 21 14:04:08 crc kubenswrapper[5133]: I1121 14:04:08.946593 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="ccddde96-ed5a-4ebe-8f9d-9cb64115f434" containerName="init" Nov 21 14:04:08 crc kubenswrapper[5133]: I1121 14:04:08.947255 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-dcmgn" Nov 21 14:04:08 crc kubenswrapper[5133]: I1121 14:04:08.951481 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 21 14:04:08 crc kubenswrapper[5133]: I1121 14:04:08.951764 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 21 14:04:08 crc kubenswrapper[5133]: I1121 14:04:08.951946 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 21 14:04:08 crc kubenswrapper[5133]: I1121 14:04:08.952726 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 21 14:04:08 crc kubenswrapper[5133]: I1121 14:04:08.955975 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-bv5lp" Nov 21 14:04:08 crc kubenswrapper[5133]: I1121 14:04:08.963750 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-dcmgn"] Nov 21 14:04:09 crc kubenswrapper[5133]: I1121 14:04:09.064493 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7b1d299f-58dc-43de-a331-e7a15063fcd0-scripts\") pod \"keystone-bootstrap-dcmgn\" (UID: \"7b1d299f-58dc-43de-a331-e7a15063fcd0\") " pod="openstack/keystone-bootstrap-dcmgn" Nov 21 14:04:09 crc kubenswrapper[5133]: I1121 14:04:09.065594 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7b1d299f-58dc-43de-a331-e7a15063fcd0-fernet-keys\") pod \"keystone-bootstrap-dcmgn\" (UID: \"7b1d299f-58dc-43de-a331-e7a15063fcd0\") " pod="openstack/keystone-bootstrap-dcmgn" Nov 21 14:04:09 crc kubenswrapper[5133]: I1121 14:04:09.065864 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7b1d299f-58dc-43de-a331-e7a15063fcd0-config-data\") pod \"keystone-bootstrap-dcmgn\" (UID: \"7b1d299f-58dc-43de-a331-e7a15063fcd0\") " pod="openstack/keystone-bootstrap-dcmgn" Nov 21 14:04:09 crc kubenswrapper[5133]: I1121 14:04:09.066111 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-22xl6\" (UniqueName: \"kubernetes.io/projected/7b1d299f-58dc-43de-a331-e7a15063fcd0-kube-api-access-22xl6\") pod \"keystone-bootstrap-dcmgn\" (UID: \"7b1d299f-58dc-43de-a331-e7a15063fcd0\") " pod="openstack/keystone-bootstrap-dcmgn" Nov 21 14:04:09 crc kubenswrapper[5133]: I1121 14:04:09.066226 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/7b1d299f-58dc-43de-a331-e7a15063fcd0-credential-keys\") pod \"keystone-bootstrap-dcmgn\" (UID: \"7b1d299f-58dc-43de-a331-e7a15063fcd0\") " pod="openstack/keystone-bootstrap-dcmgn" Nov 21 14:04:09 crc kubenswrapper[5133]: I1121 14:04:09.066536 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b1d299f-58dc-43de-a331-e7a15063fcd0-combined-ca-bundle\") pod \"keystone-bootstrap-dcmgn\" (UID: \"7b1d299f-58dc-43de-a331-e7a15063fcd0\") " pod="openstack/keystone-bootstrap-dcmgn" Nov 21 14:04:09 crc kubenswrapper[5133]: I1121 14:04:09.168431 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7b1d299f-58dc-43de-a331-e7a15063fcd0-config-data\") pod \"keystone-bootstrap-dcmgn\" (UID: \"7b1d299f-58dc-43de-a331-e7a15063fcd0\") " pod="openstack/keystone-bootstrap-dcmgn" Nov 21 14:04:09 crc kubenswrapper[5133]: I1121 14:04:09.168504 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-22xl6\" (UniqueName: \"kubernetes.io/projected/7b1d299f-58dc-43de-a331-e7a15063fcd0-kube-api-access-22xl6\") pod \"keystone-bootstrap-dcmgn\" (UID: \"7b1d299f-58dc-43de-a331-e7a15063fcd0\") " pod="openstack/keystone-bootstrap-dcmgn" Nov 21 14:04:09 crc kubenswrapper[5133]: I1121 14:04:09.168546 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/7b1d299f-58dc-43de-a331-e7a15063fcd0-credential-keys\") pod \"keystone-bootstrap-dcmgn\" (UID: \"7b1d299f-58dc-43de-a331-e7a15063fcd0\") " pod="openstack/keystone-bootstrap-dcmgn" Nov 21 14:04:09 crc kubenswrapper[5133]: I1121 14:04:09.168604 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b1d299f-58dc-43de-a331-e7a15063fcd0-combined-ca-bundle\") pod \"keystone-bootstrap-dcmgn\" (UID: \"7b1d299f-58dc-43de-a331-e7a15063fcd0\") " pod="openstack/keystone-bootstrap-dcmgn" Nov 21 14:04:09 crc kubenswrapper[5133]: I1121 14:04:09.168693 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7b1d299f-58dc-43de-a331-e7a15063fcd0-scripts\") pod \"keystone-bootstrap-dcmgn\" (UID: \"7b1d299f-58dc-43de-a331-e7a15063fcd0\") " pod="openstack/keystone-bootstrap-dcmgn" Nov 21 14:04:09 crc kubenswrapper[5133]: I1121 14:04:09.168720 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7b1d299f-58dc-43de-a331-e7a15063fcd0-fernet-keys\") pod \"keystone-bootstrap-dcmgn\" (UID: \"7b1d299f-58dc-43de-a331-e7a15063fcd0\") " pod="openstack/keystone-bootstrap-dcmgn" Nov 21 14:04:09 crc kubenswrapper[5133]: I1121 14:04:09.175292 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/7b1d299f-58dc-43de-a331-e7a15063fcd0-credential-keys\") pod \"keystone-bootstrap-dcmgn\" (UID: \"7b1d299f-58dc-43de-a331-e7a15063fcd0\") " pod="openstack/keystone-bootstrap-dcmgn" Nov 21 14:04:09 crc kubenswrapper[5133]: I1121 14:04:09.175453 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b1d299f-58dc-43de-a331-e7a15063fcd0-combined-ca-bundle\") pod \"keystone-bootstrap-dcmgn\" (UID: \"7b1d299f-58dc-43de-a331-e7a15063fcd0\") " pod="openstack/keystone-bootstrap-dcmgn" Nov 21 14:04:09 crc kubenswrapper[5133]: I1121 14:04:09.179885 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7b1d299f-58dc-43de-a331-e7a15063fcd0-fernet-keys\") pod \"keystone-bootstrap-dcmgn\" (UID: \"7b1d299f-58dc-43de-a331-e7a15063fcd0\") " pod="openstack/keystone-bootstrap-dcmgn" Nov 21 14:04:09 crc kubenswrapper[5133]: I1121 14:04:09.180094 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7b1d299f-58dc-43de-a331-e7a15063fcd0-scripts\") pod \"keystone-bootstrap-dcmgn\" (UID: \"7b1d299f-58dc-43de-a331-e7a15063fcd0\") " pod="openstack/keystone-bootstrap-dcmgn" Nov 21 14:04:09 crc kubenswrapper[5133]: I1121 14:04:09.180244 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7b1d299f-58dc-43de-a331-e7a15063fcd0-config-data\") pod \"keystone-bootstrap-dcmgn\" (UID: \"7b1d299f-58dc-43de-a331-e7a15063fcd0\") " pod="openstack/keystone-bootstrap-dcmgn" Nov 21 14:04:09 crc kubenswrapper[5133]: I1121 14:04:09.188373 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-22xl6\" (UniqueName: \"kubernetes.io/projected/7b1d299f-58dc-43de-a331-e7a15063fcd0-kube-api-access-22xl6\") pod \"keystone-bootstrap-dcmgn\" (UID: \"7b1d299f-58dc-43de-a331-e7a15063fcd0\") " pod="openstack/keystone-bootstrap-dcmgn" Nov 21 14:04:09 crc kubenswrapper[5133]: E1121 14:04:09.215328 5133 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified" Nov 21 14:04:09 crc kubenswrapper[5133]: E1121 14:04:09.215575 5133 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-hdp96,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-8cz2f_openstack(b61488dd-2db1-43b5-996b-43b76a5dbda6): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 21 14:04:09 crc kubenswrapper[5133]: E1121 14:04:09.216778 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-8cz2f" podUID="b61488dd-2db1-43b5-996b-43b76a5dbda6" Nov 21 14:04:09 crc kubenswrapper[5133]: I1121 14:04:09.270225 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-dcmgn" Nov 21 14:04:09 crc kubenswrapper[5133]: I1121 14:04:09.395527 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-bdzkx" Nov 21 14:04:09 crc kubenswrapper[5133]: I1121 14:04:09.475471 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c34f5cc7-8dfa-48be-8392-0c4f8bb86208-ovsdbserver-sb\") pod \"c34f5cc7-8dfa-48be-8392-0c4f8bb86208\" (UID: \"c34f5cc7-8dfa-48be-8392-0c4f8bb86208\") " Nov 21 14:04:09 crc kubenswrapper[5133]: I1121 14:04:09.475555 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c34f5cc7-8dfa-48be-8392-0c4f8bb86208-ovsdbserver-nb\") pod \"c34f5cc7-8dfa-48be-8392-0c4f8bb86208\" (UID: \"c34f5cc7-8dfa-48be-8392-0c4f8bb86208\") " Nov 21 14:04:09 crc kubenswrapper[5133]: I1121 14:04:09.475601 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c34f5cc7-8dfa-48be-8392-0c4f8bb86208-config\") pod \"c34f5cc7-8dfa-48be-8392-0c4f8bb86208\" (UID: \"c34f5cc7-8dfa-48be-8392-0c4f8bb86208\") " Nov 21 14:04:09 crc kubenswrapper[5133]: I1121 14:04:09.475624 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c34f5cc7-8dfa-48be-8392-0c4f8bb86208-dns-svc\") pod \"c34f5cc7-8dfa-48be-8392-0c4f8bb86208\" (UID: \"c34f5cc7-8dfa-48be-8392-0c4f8bb86208\") " Nov 21 14:04:09 crc kubenswrapper[5133]: I1121 14:04:09.475746 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c7qkn\" (UniqueName: \"kubernetes.io/projected/c34f5cc7-8dfa-48be-8392-0c4f8bb86208-kube-api-access-c7qkn\") pod \"c34f5cc7-8dfa-48be-8392-0c4f8bb86208\" (UID: \"c34f5cc7-8dfa-48be-8392-0c4f8bb86208\") " Nov 21 14:04:09 crc kubenswrapper[5133]: I1121 14:04:09.487922 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c34f5cc7-8dfa-48be-8392-0c4f8bb86208-kube-api-access-c7qkn" (OuterVolumeSpecName: "kube-api-access-c7qkn") pod "c34f5cc7-8dfa-48be-8392-0c4f8bb86208" (UID: "c34f5cc7-8dfa-48be-8392-0c4f8bb86208"). InnerVolumeSpecName "kube-api-access-c7qkn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:04:09 crc kubenswrapper[5133]: I1121 14:04:09.553869 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c34f5cc7-8dfa-48be-8392-0c4f8bb86208-config" (OuterVolumeSpecName: "config") pod "c34f5cc7-8dfa-48be-8392-0c4f8bb86208" (UID: "c34f5cc7-8dfa-48be-8392-0c4f8bb86208"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:04:09 crc kubenswrapper[5133]: I1121 14:04:09.554868 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c34f5cc7-8dfa-48be-8392-0c4f8bb86208-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "c34f5cc7-8dfa-48be-8392-0c4f8bb86208" (UID: "c34f5cc7-8dfa-48be-8392-0c4f8bb86208"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:04:09 crc kubenswrapper[5133]: I1121 14:04:09.566505 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c34f5cc7-8dfa-48be-8392-0c4f8bb86208-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "c34f5cc7-8dfa-48be-8392-0c4f8bb86208" (UID: "c34f5cc7-8dfa-48be-8392-0c4f8bb86208"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:04:09 crc kubenswrapper[5133]: I1121 14:04:09.570346 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c34f5cc7-8dfa-48be-8392-0c4f8bb86208-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "c34f5cc7-8dfa-48be-8392-0c4f8bb86208" (UID: "c34f5cc7-8dfa-48be-8392-0c4f8bb86208"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:04:09 crc kubenswrapper[5133]: I1121 14:04:09.578497 5133 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c34f5cc7-8dfa-48be-8392-0c4f8bb86208-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:09 crc kubenswrapper[5133]: I1121 14:04:09.578546 5133 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c34f5cc7-8dfa-48be-8392-0c4f8bb86208-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:09 crc kubenswrapper[5133]: I1121 14:04:09.578560 5133 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c34f5cc7-8dfa-48be-8392-0c4f8bb86208-config\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:09 crc kubenswrapper[5133]: I1121 14:04:09.578575 5133 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c34f5cc7-8dfa-48be-8392-0c4f8bb86208-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:09 crc kubenswrapper[5133]: I1121 14:04:09.578588 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c7qkn\" (UniqueName: \"kubernetes.io/projected/c34f5cc7-8dfa-48be-8392-0c4f8bb86208-kube-api-access-c7qkn\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:09 crc kubenswrapper[5133]: I1121 14:04:09.736517 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-dcmgn"] Nov 21 14:04:09 crc kubenswrapper[5133]: W1121 14:04:09.741294 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7b1d299f_58dc_43de_a331_e7a15063fcd0.slice/crio-52818ef51aeccaec718d3d72c8bc2c60f8178447a60e14873df1747751cd5953 WatchSource:0}: Error finding container 52818ef51aeccaec718d3d72c8bc2c60f8178447a60e14873df1747751cd5953: Status 404 returned error can't find the container with id 52818ef51aeccaec718d3d72c8bc2c60f8178447a60e14873df1747751cd5953 Nov 21 14:04:09 crc kubenswrapper[5133]: I1121 14:04:09.895458 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"550fe8e4-32aa-40c5-b925-f6c496dfcdf2","Type":"ContainerStarted","Data":"625eee991bc80fa1055ba56cde17b404ac9090cc104cacfac4de0f0dad9fed89"} Nov 21 14:04:09 crc kubenswrapper[5133]: I1121 14:04:09.898313 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-tvs68" event={"ID":"5782c62d-fdcd-43f5-9af1-c84968e501ed","Type":"ContainerStarted","Data":"dfaba6e19a83f28c8fa298433848a54c502472ca950ad72a5c4aef29e00da940"} Nov 21 14:04:09 crc kubenswrapper[5133]: I1121 14:04:09.900209 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-dcmgn" event={"ID":"7b1d299f-58dc-43de-a331-e7a15063fcd0","Type":"ContainerStarted","Data":"52818ef51aeccaec718d3d72c8bc2c60f8178447a60e14873df1747751cd5953"} Nov 21 14:04:09 crc kubenswrapper[5133]: I1121 14:04:09.904078 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-bdzkx" Nov 21 14:04:09 crc kubenswrapper[5133]: I1121 14:04:09.904375 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-bdzkx" event={"ID":"c34f5cc7-8dfa-48be-8392-0c4f8bb86208","Type":"ContainerDied","Data":"0bf636752b964bd464281bee3e6c961b167323b1c91011d46c23d79228a6d7e0"} Nov 21 14:04:09 crc kubenswrapper[5133]: I1121 14:04:09.904478 5133 scope.go:117] "RemoveContainer" containerID="d1cf7b7258cb63743b20f8c49032ed6e0de6e4fe067e736963cddf65b034e063" Nov 21 14:04:09 crc kubenswrapper[5133]: E1121 14:04:09.904967 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified\\\"\"" pod="openstack/cinder-db-sync-8cz2f" podUID="b61488dd-2db1-43b5-996b-43b76a5dbda6" Nov 21 14:04:09 crc kubenswrapper[5133]: I1121 14:04:09.921593 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-tvs68" podStartSLOduration=2.480268226 podStartE2EDuration="25.921573598s" podCreationTimestamp="2025-11-21 14:03:44 +0000 UTC" firstStartedPulling="2025-11-21 14:03:45.719887345 +0000 UTC m=+1285.517719593" lastFinishedPulling="2025-11-21 14:04:09.161192717 +0000 UTC m=+1308.959024965" observedRunningTime="2025-11-21 14:04:09.920704465 +0000 UTC m=+1309.718536713" watchObservedRunningTime="2025-11-21 14:04:09.921573598 +0000 UTC m=+1309.719405846" Nov 21 14:04:09 crc kubenswrapper[5133]: I1121 14:04:09.937750 5133 scope.go:117] "RemoveContainer" containerID="23d1ee36620c45b12f8b35d52d2d00eea2e8cb970f3bdbbce06d63129d84e14f" Nov 21 14:04:09 crc kubenswrapper[5133]: I1121 14:04:09.980492 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8554648995-bdzkx"] Nov 21 14:04:09 crc kubenswrapper[5133]: I1121 14:04:09.990932 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-8554648995-bdzkx"] Nov 21 14:04:10 crc kubenswrapper[5133]: I1121 14:04:10.495269 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="38d44e0f-f488-4cc9-bfa2-6eb2830cc241" path="/var/lib/kubelet/pods/38d44e0f-f488-4cc9-bfa2-6eb2830cc241/volumes" Nov 21 14:04:10 crc kubenswrapper[5133]: I1121 14:04:10.496718 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c34f5cc7-8dfa-48be-8392-0c4f8bb86208" path="/var/lib/kubelet/pods/c34f5cc7-8dfa-48be-8392-0c4f8bb86208/volumes" Nov 21 14:04:10 crc kubenswrapper[5133]: I1121 14:04:10.918190 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-dcmgn" event={"ID":"7b1d299f-58dc-43de-a331-e7a15063fcd0","Type":"ContainerStarted","Data":"1d8016066359fdca507d97f1f9ba81316b356e4b80486a94be57ffe845b858cb"} Nov 21 14:04:10 crc kubenswrapper[5133]: I1121 14:04:10.941673 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-dcmgn" podStartSLOduration=2.941645664 podStartE2EDuration="2.941645664s" podCreationTimestamp="2025-11-21 14:04:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 14:04:10.940488484 +0000 UTC m=+1310.738320732" watchObservedRunningTime="2025-11-21 14:04:10.941645664 +0000 UTC m=+1310.739477912" Nov 21 14:04:11 crc kubenswrapper[5133]: I1121 14:04:11.930649 5133 generic.go:334] "Generic (PLEG): container finished" podID="5782c62d-fdcd-43f5-9af1-c84968e501ed" containerID="dfaba6e19a83f28c8fa298433848a54c502472ca950ad72a5c4aef29e00da940" exitCode=0 Nov 21 14:04:11 crc kubenswrapper[5133]: I1121 14:04:11.931103 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-tvs68" event={"ID":"5782c62d-fdcd-43f5-9af1-c84968e501ed","Type":"ContainerDied","Data":"dfaba6e19a83f28c8fa298433848a54c502472ca950ad72a5c4aef29e00da940"} Nov 21 14:04:11 crc kubenswrapper[5133]: I1121 14:04:11.933451 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"550fe8e4-32aa-40c5-b925-f6c496dfcdf2","Type":"ContainerStarted","Data":"aa9e852b69c878b1e79e5d9dc3a3cdc83f92cf8141013963bfee444cf47ef062"} Nov 21 14:04:13 crc kubenswrapper[5133]: I1121 14:04:13.957309 5133 generic.go:334] "Generic (PLEG): container finished" podID="7b1d299f-58dc-43de-a331-e7a15063fcd0" containerID="1d8016066359fdca507d97f1f9ba81316b356e4b80486a94be57ffe845b858cb" exitCode=0 Nov 21 14:04:13 crc kubenswrapper[5133]: I1121 14:04:13.957802 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-dcmgn" event={"ID":"7b1d299f-58dc-43de-a331-e7a15063fcd0","Type":"ContainerDied","Data":"1d8016066359fdca507d97f1f9ba81316b356e4b80486a94be57ffe845b858cb"} Nov 21 14:04:13 crc kubenswrapper[5133]: I1121 14:04:13.961659 5133 generic.go:334] "Generic (PLEG): container finished" podID="76b580d8-fd56-40c0-a24a-1a3234d95ca6" containerID="af35a52559600ac679d265671a6b2484259a5617266456e79ef435f6c440aef9" exitCode=0 Nov 21 14:04:13 crc kubenswrapper[5133]: I1121 14:04:13.961719 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-6blxq" event={"ID":"76b580d8-fd56-40c0-a24a-1a3234d95ca6","Type":"ContainerDied","Data":"af35a52559600ac679d265671a6b2484259a5617266456e79ef435f6c440aef9"} Nov 21 14:04:17 crc kubenswrapper[5133]: I1121 14:04:17.053367 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-6blxq" Nov 21 14:04:17 crc kubenswrapper[5133]: I1121 14:04:17.058200 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-tvs68" Nov 21 14:04:17 crc kubenswrapper[5133]: I1121 14:04:17.065657 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-dcmgn" Nov 21 14:04:17 crc kubenswrapper[5133]: I1121 14:04:17.068744 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5782c62d-fdcd-43f5-9af1-c84968e501ed-combined-ca-bundle\") pod \"5782c62d-fdcd-43f5-9af1-c84968e501ed\" (UID: \"5782c62d-fdcd-43f5-9af1-c84968e501ed\") " Nov 21 14:04:17 crc kubenswrapper[5133]: I1121 14:04:17.068823 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5782c62d-fdcd-43f5-9af1-c84968e501ed-scripts\") pod \"5782c62d-fdcd-43f5-9af1-c84968e501ed\" (UID: \"5782c62d-fdcd-43f5-9af1-c84968e501ed\") " Nov 21 14:04:17 crc kubenswrapper[5133]: I1121 14:04:17.068904 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/76b580d8-fd56-40c0-a24a-1a3234d95ca6-combined-ca-bundle\") pod \"76b580d8-fd56-40c0-a24a-1a3234d95ca6\" (UID: \"76b580d8-fd56-40c0-a24a-1a3234d95ca6\") " Nov 21 14:04:17 crc kubenswrapper[5133]: I1121 14:04:17.068948 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5782c62d-fdcd-43f5-9af1-c84968e501ed-config-data\") pod \"5782c62d-fdcd-43f5-9af1-c84968e501ed\" (UID: \"5782c62d-fdcd-43f5-9af1-c84968e501ed\") " Nov 21 14:04:17 crc kubenswrapper[5133]: I1121 14:04:17.068992 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/76b580d8-fd56-40c0-a24a-1a3234d95ca6-db-sync-config-data\") pod \"76b580d8-fd56-40c0-a24a-1a3234d95ca6\" (UID: \"76b580d8-fd56-40c0-a24a-1a3234d95ca6\") " Nov 21 14:04:17 crc kubenswrapper[5133]: I1121 14:04:17.069054 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/76b580d8-fd56-40c0-a24a-1a3234d95ca6-config-data\") pod \"76b580d8-fd56-40c0-a24a-1a3234d95ca6\" (UID: \"76b580d8-fd56-40c0-a24a-1a3234d95ca6\") " Nov 21 14:04:17 crc kubenswrapper[5133]: I1121 14:04:17.069211 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mr576\" (UniqueName: \"kubernetes.io/projected/76b580d8-fd56-40c0-a24a-1a3234d95ca6-kube-api-access-mr576\") pod \"76b580d8-fd56-40c0-a24a-1a3234d95ca6\" (UID: \"76b580d8-fd56-40c0-a24a-1a3234d95ca6\") " Nov 21 14:04:17 crc kubenswrapper[5133]: I1121 14:04:17.069238 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2mqdd\" (UniqueName: \"kubernetes.io/projected/5782c62d-fdcd-43f5-9af1-c84968e501ed-kube-api-access-2mqdd\") pod \"5782c62d-fdcd-43f5-9af1-c84968e501ed\" (UID: \"5782c62d-fdcd-43f5-9af1-c84968e501ed\") " Nov 21 14:04:17 crc kubenswrapper[5133]: I1121 14:04:17.069342 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5782c62d-fdcd-43f5-9af1-c84968e501ed-logs\") pod \"5782c62d-fdcd-43f5-9af1-c84968e501ed\" (UID: \"5782c62d-fdcd-43f5-9af1-c84968e501ed\") " Nov 21 14:04:17 crc kubenswrapper[5133]: I1121 14:04:17.070093 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5782c62d-fdcd-43f5-9af1-c84968e501ed-logs" (OuterVolumeSpecName: "logs") pod "5782c62d-fdcd-43f5-9af1-c84968e501ed" (UID: "5782c62d-fdcd-43f5-9af1-c84968e501ed"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:04:17 crc kubenswrapper[5133]: I1121 14:04:17.114606 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5782c62d-fdcd-43f5-9af1-c84968e501ed-kube-api-access-2mqdd" (OuterVolumeSpecName: "kube-api-access-2mqdd") pod "5782c62d-fdcd-43f5-9af1-c84968e501ed" (UID: "5782c62d-fdcd-43f5-9af1-c84968e501ed"). InnerVolumeSpecName "kube-api-access-2mqdd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:04:17 crc kubenswrapper[5133]: I1121 14:04:17.114719 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/76b580d8-fd56-40c0-a24a-1a3234d95ca6-kube-api-access-mr576" (OuterVolumeSpecName: "kube-api-access-mr576") pod "76b580d8-fd56-40c0-a24a-1a3234d95ca6" (UID: "76b580d8-fd56-40c0-a24a-1a3234d95ca6"). InnerVolumeSpecName "kube-api-access-mr576". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:04:17 crc kubenswrapper[5133]: I1121 14:04:17.119985 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5782c62d-fdcd-43f5-9af1-c84968e501ed-scripts" (OuterVolumeSpecName: "scripts") pod "5782c62d-fdcd-43f5-9af1-c84968e501ed" (UID: "5782c62d-fdcd-43f5-9af1-c84968e501ed"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:04:17 crc kubenswrapper[5133]: I1121 14:04:17.122027 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/76b580d8-fd56-40c0-a24a-1a3234d95ca6-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "76b580d8-fd56-40c0-a24a-1a3234d95ca6" (UID: "76b580d8-fd56-40c0-a24a-1a3234d95ca6"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:04:17 crc kubenswrapper[5133]: I1121 14:04:17.143063 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5782c62d-fdcd-43f5-9af1-c84968e501ed-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5782c62d-fdcd-43f5-9af1-c84968e501ed" (UID: "5782c62d-fdcd-43f5-9af1-c84968e501ed"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:04:17 crc kubenswrapper[5133]: I1121 14:04:17.150838 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5782c62d-fdcd-43f5-9af1-c84968e501ed-config-data" (OuterVolumeSpecName: "config-data") pod "5782c62d-fdcd-43f5-9af1-c84968e501ed" (UID: "5782c62d-fdcd-43f5-9af1-c84968e501ed"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:04:17 crc kubenswrapper[5133]: I1121 14:04:17.152406 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/76b580d8-fd56-40c0-a24a-1a3234d95ca6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "76b580d8-fd56-40c0-a24a-1a3234d95ca6" (UID: "76b580d8-fd56-40c0-a24a-1a3234d95ca6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:04:17 crc kubenswrapper[5133]: I1121 14:04:17.171404 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7b1d299f-58dc-43de-a331-e7a15063fcd0-config-data\") pod \"7b1d299f-58dc-43de-a331-e7a15063fcd0\" (UID: \"7b1d299f-58dc-43de-a331-e7a15063fcd0\") " Nov 21 14:04:17 crc kubenswrapper[5133]: I1121 14:04:17.171483 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7b1d299f-58dc-43de-a331-e7a15063fcd0-fernet-keys\") pod \"7b1d299f-58dc-43de-a331-e7a15063fcd0\" (UID: \"7b1d299f-58dc-43de-a331-e7a15063fcd0\") " Nov 21 14:04:17 crc kubenswrapper[5133]: I1121 14:04:17.171507 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b1d299f-58dc-43de-a331-e7a15063fcd0-combined-ca-bundle\") pod \"7b1d299f-58dc-43de-a331-e7a15063fcd0\" (UID: \"7b1d299f-58dc-43de-a331-e7a15063fcd0\") " Nov 21 14:04:17 crc kubenswrapper[5133]: I1121 14:04:17.171612 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7b1d299f-58dc-43de-a331-e7a15063fcd0-scripts\") pod \"7b1d299f-58dc-43de-a331-e7a15063fcd0\" (UID: \"7b1d299f-58dc-43de-a331-e7a15063fcd0\") " Nov 21 14:04:17 crc kubenswrapper[5133]: I1121 14:04:17.171675 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/7b1d299f-58dc-43de-a331-e7a15063fcd0-credential-keys\") pod \"7b1d299f-58dc-43de-a331-e7a15063fcd0\" (UID: \"7b1d299f-58dc-43de-a331-e7a15063fcd0\") " Nov 21 14:04:17 crc kubenswrapper[5133]: I1121 14:04:17.171695 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-22xl6\" (UniqueName: \"kubernetes.io/projected/7b1d299f-58dc-43de-a331-e7a15063fcd0-kube-api-access-22xl6\") pod \"7b1d299f-58dc-43de-a331-e7a15063fcd0\" (UID: \"7b1d299f-58dc-43de-a331-e7a15063fcd0\") " Nov 21 14:04:17 crc kubenswrapper[5133]: I1121 14:04:17.172444 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mr576\" (UniqueName: \"kubernetes.io/projected/76b580d8-fd56-40c0-a24a-1a3234d95ca6-kube-api-access-mr576\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:17 crc kubenswrapper[5133]: I1121 14:04:17.172463 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2mqdd\" (UniqueName: \"kubernetes.io/projected/5782c62d-fdcd-43f5-9af1-c84968e501ed-kube-api-access-2mqdd\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:17 crc kubenswrapper[5133]: I1121 14:04:17.172473 5133 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5782c62d-fdcd-43f5-9af1-c84968e501ed-logs\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:17 crc kubenswrapper[5133]: I1121 14:04:17.172485 5133 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5782c62d-fdcd-43f5-9af1-c84968e501ed-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:17 crc kubenswrapper[5133]: I1121 14:04:17.172496 5133 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5782c62d-fdcd-43f5-9af1-c84968e501ed-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:17 crc kubenswrapper[5133]: I1121 14:04:17.172507 5133 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/76b580d8-fd56-40c0-a24a-1a3234d95ca6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:17 crc kubenswrapper[5133]: I1121 14:04:17.172526 5133 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5782c62d-fdcd-43f5-9af1-c84968e501ed-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:17 crc kubenswrapper[5133]: I1121 14:04:17.172545 5133 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/76b580d8-fd56-40c0-a24a-1a3234d95ca6-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:17 crc kubenswrapper[5133]: I1121 14:04:17.175715 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b1d299f-58dc-43de-a331-e7a15063fcd0-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "7b1d299f-58dc-43de-a331-e7a15063fcd0" (UID: "7b1d299f-58dc-43de-a331-e7a15063fcd0"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:04:17 crc kubenswrapper[5133]: I1121 14:04:17.180435 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7b1d299f-58dc-43de-a331-e7a15063fcd0-kube-api-access-22xl6" (OuterVolumeSpecName: "kube-api-access-22xl6") pod "7b1d299f-58dc-43de-a331-e7a15063fcd0" (UID: "7b1d299f-58dc-43de-a331-e7a15063fcd0"). InnerVolumeSpecName "kube-api-access-22xl6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:04:17 crc kubenswrapper[5133]: I1121 14:04:17.185448 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b1d299f-58dc-43de-a331-e7a15063fcd0-scripts" (OuterVolumeSpecName: "scripts") pod "7b1d299f-58dc-43de-a331-e7a15063fcd0" (UID: "7b1d299f-58dc-43de-a331-e7a15063fcd0"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:04:17 crc kubenswrapper[5133]: I1121 14:04:17.185982 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b1d299f-58dc-43de-a331-e7a15063fcd0-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "7b1d299f-58dc-43de-a331-e7a15063fcd0" (UID: "7b1d299f-58dc-43de-a331-e7a15063fcd0"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:04:17 crc kubenswrapper[5133]: I1121 14:04:17.203411 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b1d299f-58dc-43de-a331-e7a15063fcd0-config-data" (OuterVolumeSpecName: "config-data") pod "7b1d299f-58dc-43de-a331-e7a15063fcd0" (UID: "7b1d299f-58dc-43de-a331-e7a15063fcd0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:04:17 crc kubenswrapper[5133]: I1121 14:04:17.216142 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/76b580d8-fd56-40c0-a24a-1a3234d95ca6-config-data" (OuterVolumeSpecName: "config-data") pod "76b580d8-fd56-40c0-a24a-1a3234d95ca6" (UID: "76b580d8-fd56-40c0-a24a-1a3234d95ca6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:04:17 crc kubenswrapper[5133]: I1121 14:04:17.216632 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b1d299f-58dc-43de-a331-e7a15063fcd0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7b1d299f-58dc-43de-a331-e7a15063fcd0" (UID: "7b1d299f-58dc-43de-a331-e7a15063fcd0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:04:17 crc kubenswrapper[5133]: I1121 14:04:17.274229 5133 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7b1d299f-58dc-43de-a331-e7a15063fcd0-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:17 crc kubenswrapper[5133]: I1121 14:04:17.274294 5133 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/76b580d8-fd56-40c0-a24a-1a3234d95ca6-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:17 crc kubenswrapper[5133]: I1121 14:04:17.274307 5133 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b1d299f-58dc-43de-a331-e7a15063fcd0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:17 crc kubenswrapper[5133]: I1121 14:04:17.274321 5133 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7b1d299f-58dc-43de-a331-e7a15063fcd0-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:17 crc kubenswrapper[5133]: I1121 14:04:17.274330 5133 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7b1d299f-58dc-43de-a331-e7a15063fcd0-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:17 crc kubenswrapper[5133]: I1121 14:04:17.274343 5133 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/7b1d299f-58dc-43de-a331-e7a15063fcd0-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:17 crc kubenswrapper[5133]: I1121 14:04:17.274373 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-22xl6\" (UniqueName: \"kubernetes.io/projected/7b1d299f-58dc-43de-a331-e7a15063fcd0-kube-api-access-22xl6\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.006356 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"550fe8e4-32aa-40c5-b925-f6c496dfcdf2","Type":"ContainerStarted","Data":"dc316e1cedd0ed9d7085a3a50a7fd9974b26effca798e7c0d6309345c02ddbf7"} Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.008269 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-tvs68" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.008284 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-tvs68" event={"ID":"5782c62d-fdcd-43f5-9af1-c84968e501ed","Type":"ContainerDied","Data":"7ba367f29902ff91c41d5f9dc94241887aed309de045205f754d203e86089e58"} Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.008361 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7ba367f29902ff91c41d5f9dc94241887aed309de045205f754d203e86089e58" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.010792 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-dcmgn" event={"ID":"7b1d299f-58dc-43de-a331-e7a15063fcd0","Type":"ContainerDied","Data":"52818ef51aeccaec718d3d72c8bc2c60f8178447a60e14873df1747751cd5953"} Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.010836 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="52818ef51aeccaec718d3d72c8bc2c60f8178447a60e14873df1747751cd5953" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.010921 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-dcmgn" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.014239 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-6blxq" event={"ID":"76b580d8-fd56-40c0-a24a-1a3234d95ca6","Type":"ContainerDied","Data":"6d9538441e981f10dc0bf28853063e4e5b3f0233709e0df3963660e88a5ce489"} Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.014287 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6d9538441e981f10dc0bf28853063e4e5b3f0233709e0df3963660e88a5ce489" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.014371 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-6blxq" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.283693 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-f84bff9cd-skfwq"] Nov 21 14:04:18 crc kubenswrapper[5133]: E1121 14:04:18.284268 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c34f5cc7-8dfa-48be-8392-0c4f8bb86208" containerName="dnsmasq-dns" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.284287 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="c34f5cc7-8dfa-48be-8392-0c4f8bb86208" containerName="dnsmasq-dns" Nov 21 14:04:18 crc kubenswrapper[5133]: E1121 14:04:18.284303 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c34f5cc7-8dfa-48be-8392-0c4f8bb86208" containerName="init" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.284310 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="c34f5cc7-8dfa-48be-8392-0c4f8bb86208" containerName="init" Nov 21 14:04:18 crc kubenswrapper[5133]: E1121 14:04:18.284323 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5782c62d-fdcd-43f5-9af1-c84968e501ed" containerName="placement-db-sync" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.284334 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="5782c62d-fdcd-43f5-9af1-c84968e501ed" containerName="placement-db-sync" Nov 21 14:04:18 crc kubenswrapper[5133]: E1121 14:04:18.284352 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="76b580d8-fd56-40c0-a24a-1a3234d95ca6" containerName="glance-db-sync" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.284360 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="76b580d8-fd56-40c0-a24a-1a3234d95ca6" containerName="glance-db-sync" Nov 21 14:04:18 crc kubenswrapper[5133]: E1121 14:04:18.284372 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b1d299f-58dc-43de-a331-e7a15063fcd0" containerName="keystone-bootstrap" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.284396 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b1d299f-58dc-43de-a331-e7a15063fcd0" containerName="keystone-bootstrap" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.284585 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="5782c62d-fdcd-43f5-9af1-c84968e501ed" containerName="placement-db-sync" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.284614 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="7b1d299f-58dc-43de-a331-e7a15063fcd0" containerName="keystone-bootstrap" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.284635 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="c34f5cc7-8dfa-48be-8392-0c4f8bb86208" containerName="dnsmasq-dns" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.284653 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="76b580d8-fd56-40c0-a24a-1a3234d95ca6" containerName="glance-db-sync" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.285446 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-f84bff9cd-skfwq" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.287602 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-bv5lp" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.288283 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.288458 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.289024 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.289276 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.292594 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-55d9467df6-xwjg4"] Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.294517 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-55d9467df6-xwjg4" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.300236 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.300721 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-8jkmh" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.300958 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.301361 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.301530 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.307082 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a76b0db6-05bc-45f8-a7cb-38bd7280d541-logs\") pod \"placement-55d9467df6-xwjg4\" (UID: \"a76b0db6-05bc-45f8-a7cb-38bd7280d541\") " pod="openstack/placement-55d9467df6-xwjg4" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.307128 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b0c7d424-6218-4da5-ae54-203c06f3d1be-scripts\") pod \"keystone-f84bff9cd-skfwq\" (UID: \"b0c7d424-6218-4da5-ae54-203c06f3d1be\") " pod="openstack/keystone-f84bff9cd-skfwq" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.307172 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a76b0db6-05bc-45f8-a7cb-38bd7280d541-public-tls-certs\") pod \"placement-55d9467df6-xwjg4\" (UID: \"a76b0db6-05bc-45f8-a7cb-38bd7280d541\") " pod="openstack/placement-55d9467df6-xwjg4" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.307207 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a76b0db6-05bc-45f8-a7cb-38bd7280d541-config-data\") pod \"placement-55d9467df6-xwjg4\" (UID: \"a76b0db6-05bc-45f8-a7cb-38bd7280d541\") " pod="openstack/placement-55d9467df6-xwjg4" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.307229 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a76b0db6-05bc-45f8-a7cb-38bd7280d541-combined-ca-bundle\") pod \"placement-55d9467df6-xwjg4\" (UID: \"a76b0db6-05bc-45f8-a7cb-38bd7280d541\") " pod="openstack/placement-55d9467df6-xwjg4" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.307261 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a76b0db6-05bc-45f8-a7cb-38bd7280d541-internal-tls-certs\") pod \"placement-55d9467df6-xwjg4\" (UID: \"a76b0db6-05bc-45f8-a7cb-38bd7280d541\") " pod="openstack/placement-55d9467df6-xwjg4" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.307292 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b0c7d424-6218-4da5-ae54-203c06f3d1be-fernet-keys\") pod \"keystone-f84bff9cd-skfwq\" (UID: \"b0c7d424-6218-4da5-ae54-203c06f3d1be\") " pod="openstack/keystone-f84bff9cd-skfwq" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.307328 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b0c7d424-6218-4da5-ae54-203c06f3d1be-combined-ca-bundle\") pod \"keystone-f84bff9cd-skfwq\" (UID: \"b0c7d424-6218-4da5-ae54-203c06f3d1be\") " pod="openstack/keystone-f84bff9cd-skfwq" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.307354 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8htj8\" (UniqueName: \"kubernetes.io/projected/a76b0db6-05bc-45f8-a7cb-38bd7280d541-kube-api-access-8htj8\") pod \"placement-55d9467df6-xwjg4\" (UID: \"a76b0db6-05bc-45f8-a7cb-38bd7280d541\") " pod="openstack/placement-55d9467df6-xwjg4" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.307380 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5nlkp\" (UniqueName: \"kubernetes.io/projected/b0c7d424-6218-4da5-ae54-203c06f3d1be-kube-api-access-5nlkp\") pod \"keystone-f84bff9cd-skfwq\" (UID: \"b0c7d424-6218-4da5-ae54-203c06f3d1be\") " pod="openstack/keystone-f84bff9cd-skfwq" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.307400 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b0c7d424-6218-4da5-ae54-203c06f3d1be-credential-keys\") pod \"keystone-f84bff9cd-skfwq\" (UID: \"b0c7d424-6218-4da5-ae54-203c06f3d1be\") " pod="openstack/keystone-f84bff9cd-skfwq" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.307425 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b0c7d424-6218-4da5-ae54-203c06f3d1be-internal-tls-certs\") pod \"keystone-f84bff9cd-skfwq\" (UID: \"b0c7d424-6218-4da5-ae54-203c06f3d1be\") " pod="openstack/keystone-f84bff9cd-skfwq" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.307448 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a76b0db6-05bc-45f8-a7cb-38bd7280d541-scripts\") pod \"placement-55d9467df6-xwjg4\" (UID: \"a76b0db6-05bc-45f8-a7cb-38bd7280d541\") " pod="openstack/placement-55d9467df6-xwjg4" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.307467 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b0c7d424-6218-4da5-ae54-203c06f3d1be-config-data\") pod \"keystone-f84bff9cd-skfwq\" (UID: \"b0c7d424-6218-4da5-ae54-203c06f3d1be\") " pod="openstack/keystone-f84bff9cd-skfwq" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.307510 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b0c7d424-6218-4da5-ae54-203c06f3d1be-public-tls-certs\") pod \"keystone-f84bff9cd-skfwq\" (UID: \"b0c7d424-6218-4da5-ae54-203c06f3d1be\") " pod="openstack/keystone-f84bff9cd-skfwq" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.309231 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-55d9467df6-xwjg4"] Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.316542 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.353577 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-f84bff9cd-skfwq"] Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.408441 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b0c7d424-6218-4da5-ae54-203c06f3d1be-public-tls-certs\") pod \"keystone-f84bff9cd-skfwq\" (UID: \"b0c7d424-6218-4da5-ae54-203c06f3d1be\") " pod="openstack/keystone-f84bff9cd-skfwq" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.408507 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a76b0db6-05bc-45f8-a7cb-38bd7280d541-logs\") pod \"placement-55d9467df6-xwjg4\" (UID: \"a76b0db6-05bc-45f8-a7cb-38bd7280d541\") " pod="openstack/placement-55d9467df6-xwjg4" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.408542 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b0c7d424-6218-4da5-ae54-203c06f3d1be-scripts\") pod \"keystone-f84bff9cd-skfwq\" (UID: \"b0c7d424-6218-4da5-ae54-203c06f3d1be\") " pod="openstack/keystone-f84bff9cd-skfwq" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.408579 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a76b0db6-05bc-45f8-a7cb-38bd7280d541-public-tls-certs\") pod \"placement-55d9467df6-xwjg4\" (UID: \"a76b0db6-05bc-45f8-a7cb-38bd7280d541\") " pod="openstack/placement-55d9467df6-xwjg4" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.408607 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a76b0db6-05bc-45f8-a7cb-38bd7280d541-config-data\") pod \"placement-55d9467df6-xwjg4\" (UID: \"a76b0db6-05bc-45f8-a7cb-38bd7280d541\") " pod="openstack/placement-55d9467df6-xwjg4" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.408632 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a76b0db6-05bc-45f8-a7cb-38bd7280d541-combined-ca-bundle\") pod \"placement-55d9467df6-xwjg4\" (UID: \"a76b0db6-05bc-45f8-a7cb-38bd7280d541\") " pod="openstack/placement-55d9467df6-xwjg4" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.408676 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a76b0db6-05bc-45f8-a7cb-38bd7280d541-internal-tls-certs\") pod \"placement-55d9467df6-xwjg4\" (UID: \"a76b0db6-05bc-45f8-a7cb-38bd7280d541\") " pod="openstack/placement-55d9467df6-xwjg4" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.408713 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b0c7d424-6218-4da5-ae54-203c06f3d1be-fernet-keys\") pod \"keystone-f84bff9cd-skfwq\" (UID: \"b0c7d424-6218-4da5-ae54-203c06f3d1be\") " pod="openstack/keystone-f84bff9cd-skfwq" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.408752 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b0c7d424-6218-4da5-ae54-203c06f3d1be-combined-ca-bundle\") pod \"keystone-f84bff9cd-skfwq\" (UID: \"b0c7d424-6218-4da5-ae54-203c06f3d1be\") " pod="openstack/keystone-f84bff9cd-skfwq" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.408779 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8htj8\" (UniqueName: \"kubernetes.io/projected/a76b0db6-05bc-45f8-a7cb-38bd7280d541-kube-api-access-8htj8\") pod \"placement-55d9467df6-xwjg4\" (UID: \"a76b0db6-05bc-45f8-a7cb-38bd7280d541\") " pod="openstack/placement-55d9467df6-xwjg4" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.408807 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5nlkp\" (UniqueName: \"kubernetes.io/projected/b0c7d424-6218-4da5-ae54-203c06f3d1be-kube-api-access-5nlkp\") pod \"keystone-f84bff9cd-skfwq\" (UID: \"b0c7d424-6218-4da5-ae54-203c06f3d1be\") " pod="openstack/keystone-f84bff9cd-skfwq" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.408837 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b0c7d424-6218-4da5-ae54-203c06f3d1be-credential-keys\") pod \"keystone-f84bff9cd-skfwq\" (UID: \"b0c7d424-6218-4da5-ae54-203c06f3d1be\") " pod="openstack/keystone-f84bff9cd-skfwq" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.408872 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b0c7d424-6218-4da5-ae54-203c06f3d1be-internal-tls-certs\") pod \"keystone-f84bff9cd-skfwq\" (UID: \"b0c7d424-6218-4da5-ae54-203c06f3d1be\") " pod="openstack/keystone-f84bff9cd-skfwq" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.408893 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a76b0db6-05bc-45f8-a7cb-38bd7280d541-scripts\") pod \"placement-55d9467df6-xwjg4\" (UID: \"a76b0db6-05bc-45f8-a7cb-38bd7280d541\") " pod="openstack/placement-55d9467df6-xwjg4" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.408921 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b0c7d424-6218-4da5-ae54-203c06f3d1be-config-data\") pod \"keystone-f84bff9cd-skfwq\" (UID: \"b0c7d424-6218-4da5-ae54-203c06f3d1be\") " pod="openstack/keystone-f84bff9cd-skfwq" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.421609 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a76b0db6-05bc-45f8-a7cb-38bd7280d541-logs\") pod \"placement-55d9467df6-xwjg4\" (UID: \"a76b0db6-05bc-45f8-a7cb-38bd7280d541\") " pod="openstack/placement-55d9467df6-xwjg4" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.431081 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b0c7d424-6218-4da5-ae54-203c06f3d1be-config-data\") pod \"keystone-f84bff9cd-skfwq\" (UID: \"b0c7d424-6218-4da5-ae54-203c06f3d1be\") " pod="openstack/keystone-f84bff9cd-skfwq" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.430566 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a76b0db6-05bc-45f8-a7cb-38bd7280d541-config-data\") pod \"placement-55d9467df6-xwjg4\" (UID: \"a76b0db6-05bc-45f8-a7cb-38bd7280d541\") " pod="openstack/placement-55d9467df6-xwjg4" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.436193 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a76b0db6-05bc-45f8-a7cb-38bd7280d541-scripts\") pod \"placement-55d9467df6-xwjg4\" (UID: \"a76b0db6-05bc-45f8-a7cb-38bd7280d541\") " pod="openstack/placement-55d9467df6-xwjg4" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.437197 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b0c7d424-6218-4da5-ae54-203c06f3d1be-scripts\") pod \"keystone-f84bff9cd-skfwq\" (UID: \"b0c7d424-6218-4da5-ae54-203c06f3d1be\") " pod="openstack/keystone-f84bff9cd-skfwq" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.438447 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a76b0db6-05bc-45f8-a7cb-38bd7280d541-combined-ca-bundle\") pod \"placement-55d9467df6-xwjg4\" (UID: \"a76b0db6-05bc-45f8-a7cb-38bd7280d541\") " pod="openstack/placement-55d9467df6-xwjg4" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.445521 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b0c7d424-6218-4da5-ae54-203c06f3d1be-fernet-keys\") pod \"keystone-f84bff9cd-skfwq\" (UID: \"b0c7d424-6218-4da5-ae54-203c06f3d1be\") " pod="openstack/keystone-f84bff9cd-skfwq" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.447056 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b0c7d424-6218-4da5-ae54-203c06f3d1be-public-tls-certs\") pod \"keystone-f84bff9cd-skfwq\" (UID: \"b0c7d424-6218-4da5-ae54-203c06f3d1be\") " pod="openstack/keystone-f84bff9cd-skfwq" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.447476 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a76b0db6-05bc-45f8-a7cb-38bd7280d541-internal-tls-certs\") pod \"placement-55d9467df6-xwjg4\" (UID: \"a76b0db6-05bc-45f8-a7cb-38bd7280d541\") " pod="openstack/placement-55d9467df6-xwjg4" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.447948 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b0c7d424-6218-4da5-ae54-203c06f3d1be-combined-ca-bundle\") pod \"keystone-f84bff9cd-skfwq\" (UID: \"b0c7d424-6218-4da5-ae54-203c06f3d1be\") " pod="openstack/keystone-f84bff9cd-skfwq" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.449727 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b0c7d424-6218-4da5-ae54-203c06f3d1be-credential-keys\") pod \"keystone-f84bff9cd-skfwq\" (UID: \"b0c7d424-6218-4da5-ae54-203c06f3d1be\") " pod="openstack/keystone-f84bff9cd-skfwq" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.460535 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b0c7d424-6218-4da5-ae54-203c06f3d1be-internal-tls-certs\") pod \"keystone-f84bff9cd-skfwq\" (UID: \"b0c7d424-6218-4da5-ae54-203c06f3d1be\") " pod="openstack/keystone-f84bff9cd-skfwq" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.468356 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8htj8\" (UniqueName: \"kubernetes.io/projected/a76b0db6-05bc-45f8-a7cb-38bd7280d541-kube-api-access-8htj8\") pod \"placement-55d9467df6-xwjg4\" (UID: \"a76b0db6-05bc-45f8-a7cb-38bd7280d541\") " pod="openstack/placement-55d9467df6-xwjg4" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.473637 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a76b0db6-05bc-45f8-a7cb-38bd7280d541-public-tls-certs\") pod \"placement-55d9467df6-xwjg4\" (UID: \"a76b0db6-05bc-45f8-a7cb-38bd7280d541\") " pod="openstack/placement-55d9467df6-xwjg4" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.500710 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5nlkp\" (UniqueName: \"kubernetes.io/projected/b0c7d424-6218-4da5-ae54-203c06f3d1be-kube-api-access-5nlkp\") pod \"keystone-f84bff9cd-skfwq\" (UID: \"b0c7d424-6218-4da5-ae54-203c06f3d1be\") " pod="openstack/keystone-f84bff9cd-skfwq" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.595280 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5b6dbdb6f5-49x6h"] Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.597437 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b6dbdb6f5-49x6h" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.607736 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5b6dbdb6f5-49x6h"] Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.629497 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wgxt5\" (UniqueName: \"kubernetes.io/projected/8d689e79-315a-499a-a76e-7e4a79cf977c-kube-api-access-wgxt5\") pod \"dnsmasq-dns-5b6dbdb6f5-49x6h\" (UID: \"8d689e79-315a-499a-a76e-7e4a79cf977c\") " pod="openstack/dnsmasq-dns-5b6dbdb6f5-49x6h" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.629591 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8d689e79-315a-499a-a76e-7e4a79cf977c-ovsdbserver-nb\") pod \"dnsmasq-dns-5b6dbdb6f5-49x6h\" (UID: \"8d689e79-315a-499a-a76e-7e4a79cf977c\") " pod="openstack/dnsmasq-dns-5b6dbdb6f5-49x6h" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.629644 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8d689e79-315a-499a-a76e-7e4a79cf977c-config\") pod \"dnsmasq-dns-5b6dbdb6f5-49x6h\" (UID: \"8d689e79-315a-499a-a76e-7e4a79cf977c\") " pod="openstack/dnsmasq-dns-5b6dbdb6f5-49x6h" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.629677 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8d689e79-315a-499a-a76e-7e4a79cf977c-dns-svc\") pod \"dnsmasq-dns-5b6dbdb6f5-49x6h\" (UID: \"8d689e79-315a-499a-a76e-7e4a79cf977c\") " pod="openstack/dnsmasq-dns-5b6dbdb6f5-49x6h" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.629717 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8d689e79-315a-499a-a76e-7e4a79cf977c-ovsdbserver-sb\") pod \"dnsmasq-dns-5b6dbdb6f5-49x6h\" (UID: \"8d689e79-315a-499a-a76e-7e4a79cf977c\") " pod="openstack/dnsmasq-dns-5b6dbdb6f5-49x6h" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.637515 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-f84bff9cd-skfwq" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.658705 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-55d9467df6-xwjg4" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.731577 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8d689e79-315a-499a-a76e-7e4a79cf977c-config\") pod \"dnsmasq-dns-5b6dbdb6f5-49x6h\" (UID: \"8d689e79-315a-499a-a76e-7e4a79cf977c\") " pod="openstack/dnsmasq-dns-5b6dbdb6f5-49x6h" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.731630 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8d689e79-315a-499a-a76e-7e4a79cf977c-dns-svc\") pod \"dnsmasq-dns-5b6dbdb6f5-49x6h\" (UID: \"8d689e79-315a-499a-a76e-7e4a79cf977c\") " pod="openstack/dnsmasq-dns-5b6dbdb6f5-49x6h" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.731668 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8d689e79-315a-499a-a76e-7e4a79cf977c-ovsdbserver-sb\") pod \"dnsmasq-dns-5b6dbdb6f5-49x6h\" (UID: \"8d689e79-315a-499a-a76e-7e4a79cf977c\") " pod="openstack/dnsmasq-dns-5b6dbdb6f5-49x6h" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.731742 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wgxt5\" (UniqueName: \"kubernetes.io/projected/8d689e79-315a-499a-a76e-7e4a79cf977c-kube-api-access-wgxt5\") pod \"dnsmasq-dns-5b6dbdb6f5-49x6h\" (UID: \"8d689e79-315a-499a-a76e-7e4a79cf977c\") " pod="openstack/dnsmasq-dns-5b6dbdb6f5-49x6h" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.731787 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8d689e79-315a-499a-a76e-7e4a79cf977c-ovsdbserver-nb\") pod \"dnsmasq-dns-5b6dbdb6f5-49x6h\" (UID: \"8d689e79-315a-499a-a76e-7e4a79cf977c\") " pod="openstack/dnsmasq-dns-5b6dbdb6f5-49x6h" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.739015 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8d689e79-315a-499a-a76e-7e4a79cf977c-ovsdbserver-nb\") pod \"dnsmasq-dns-5b6dbdb6f5-49x6h\" (UID: \"8d689e79-315a-499a-a76e-7e4a79cf977c\") " pod="openstack/dnsmasq-dns-5b6dbdb6f5-49x6h" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.741487 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8d689e79-315a-499a-a76e-7e4a79cf977c-dns-svc\") pod \"dnsmasq-dns-5b6dbdb6f5-49x6h\" (UID: \"8d689e79-315a-499a-a76e-7e4a79cf977c\") " pod="openstack/dnsmasq-dns-5b6dbdb6f5-49x6h" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.742214 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8d689e79-315a-499a-a76e-7e4a79cf977c-config\") pod \"dnsmasq-dns-5b6dbdb6f5-49x6h\" (UID: \"8d689e79-315a-499a-a76e-7e4a79cf977c\") " pod="openstack/dnsmasq-dns-5b6dbdb6f5-49x6h" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.747659 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8d689e79-315a-499a-a76e-7e4a79cf977c-ovsdbserver-sb\") pod \"dnsmasq-dns-5b6dbdb6f5-49x6h\" (UID: \"8d689e79-315a-499a-a76e-7e4a79cf977c\") " pod="openstack/dnsmasq-dns-5b6dbdb6f5-49x6h" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.776847 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wgxt5\" (UniqueName: \"kubernetes.io/projected/8d689e79-315a-499a-a76e-7e4a79cf977c-kube-api-access-wgxt5\") pod \"dnsmasq-dns-5b6dbdb6f5-49x6h\" (UID: \"8d689e79-315a-499a-a76e-7e4a79cf977c\") " pod="openstack/dnsmasq-dns-5b6dbdb6f5-49x6h" Nov 21 14:04:18 crc kubenswrapper[5133]: I1121 14:04:18.917866 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b6dbdb6f5-49x6h" Nov 21 14:04:19 crc kubenswrapper[5133]: I1121 14:04:19.264851 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-55d9467df6-xwjg4"] Nov 21 14:04:19 crc kubenswrapper[5133]: W1121 14:04:19.283810 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda76b0db6_05bc_45f8_a7cb_38bd7280d541.slice/crio-9cfc312b7639e320df8ace579ac4df5f68a11d9e2a4284a33cac82586483648e WatchSource:0}: Error finding container 9cfc312b7639e320df8ace579ac4df5f68a11d9e2a4284a33cac82586483648e: Status 404 returned error can't find the container with id 9cfc312b7639e320df8ace579ac4df5f68a11d9e2a4284a33cac82586483648e Nov 21 14:04:19 crc kubenswrapper[5133]: I1121 14:04:19.303096 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-f84bff9cd-skfwq"] Nov 21 14:04:19 crc kubenswrapper[5133]: W1121 14:04:19.312973 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb0c7d424_6218_4da5_ae54_203c06f3d1be.slice/crio-4bf087e882e5f51795ead763b23d830c5758a174f2403c6e5a262dd0009d57de WatchSource:0}: Error finding container 4bf087e882e5f51795ead763b23d830c5758a174f2403c6e5a262dd0009d57de: Status 404 returned error can't find the container with id 4bf087e882e5f51795ead763b23d830c5758a174f2403c6e5a262dd0009d57de Nov 21 14:04:19 crc kubenswrapper[5133]: I1121 14:04:19.552436 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5b6dbdb6f5-49x6h"] Nov 21 14:04:20 crc kubenswrapper[5133]: I1121 14:04:20.081609 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-55d9467df6-xwjg4" event={"ID":"a76b0db6-05bc-45f8-a7cb-38bd7280d541","Type":"ContainerStarted","Data":"3bebf1f2ce6e411e662485ecacd90c8d2ca15f79f7b4234666182f7f5ef2459e"} Nov 21 14:04:20 crc kubenswrapper[5133]: I1121 14:04:20.081676 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-55d9467df6-xwjg4" event={"ID":"a76b0db6-05bc-45f8-a7cb-38bd7280d541","Type":"ContainerStarted","Data":"c75523d56afd21538fd96b712989336ba5aac7b9da21757d1f27ed5a85b5653c"} Nov 21 14:04:20 crc kubenswrapper[5133]: I1121 14:04:20.081697 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-55d9467df6-xwjg4" event={"ID":"a76b0db6-05bc-45f8-a7cb-38bd7280d541","Type":"ContainerStarted","Data":"9cfc312b7639e320df8ace579ac4df5f68a11d9e2a4284a33cac82586483648e"} Nov 21 14:04:20 crc kubenswrapper[5133]: I1121 14:04:20.081821 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-55d9467df6-xwjg4" Nov 21 14:04:20 crc kubenswrapper[5133]: I1121 14:04:20.087585 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-f84bff9cd-skfwq" event={"ID":"b0c7d424-6218-4da5-ae54-203c06f3d1be","Type":"ContainerStarted","Data":"99b36e6c215109a6979304c4bb215c52e9e037e84b5079845aeb048fd35189b9"} Nov 21 14:04:20 crc kubenswrapper[5133]: I1121 14:04:20.087667 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-f84bff9cd-skfwq" event={"ID":"b0c7d424-6218-4da5-ae54-203c06f3d1be","Type":"ContainerStarted","Data":"4bf087e882e5f51795ead763b23d830c5758a174f2403c6e5a262dd0009d57de"} Nov 21 14:04:20 crc kubenswrapper[5133]: I1121 14:04:20.087700 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-f84bff9cd-skfwq" Nov 21 14:04:20 crc kubenswrapper[5133]: I1121 14:04:20.095027 5133 generic.go:334] "Generic (PLEG): container finished" podID="8d689e79-315a-499a-a76e-7e4a79cf977c" containerID="739c3ec9d7d35f572e69ca607334f41b9cb4f2834f4266965e36d3f773b9ae89" exitCode=0 Nov 21 14:04:20 crc kubenswrapper[5133]: I1121 14:04:20.095084 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b6dbdb6f5-49x6h" event={"ID":"8d689e79-315a-499a-a76e-7e4a79cf977c","Type":"ContainerDied","Data":"739c3ec9d7d35f572e69ca607334f41b9cb4f2834f4266965e36d3f773b9ae89"} Nov 21 14:04:20 crc kubenswrapper[5133]: I1121 14:04:20.095112 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b6dbdb6f5-49x6h" event={"ID":"8d689e79-315a-499a-a76e-7e4a79cf977c","Type":"ContainerStarted","Data":"1bd7742c497547f249ec694ca3f190e34499c2df351e3c1f674365a27894bde5"} Nov 21 14:04:20 crc kubenswrapper[5133]: I1121 14:04:20.149565 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-55d9467df6-xwjg4" podStartSLOduration=2.149533067 podStartE2EDuration="2.149533067s" podCreationTimestamp="2025-11-21 14:04:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 14:04:20.117037889 +0000 UTC m=+1319.914870157" watchObservedRunningTime="2025-11-21 14:04:20.149533067 +0000 UTC m=+1319.947365325" Nov 21 14:04:20 crc kubenswrapper[5133]: I1121 14:04:20.209940 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-f84bff9cd-skfwq" podStartSLOduration=2.20991825 podStartE2EDuration="2.20991825s" podCreationTimestamp="2025-11-21 14:04:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 14:04:20.143606269 +0000 UTC m=+1319.941438527" watchObservedRunningTime="2025-11-21 14:04:20.20991825 +0000 UTC m=+1320.007750498" Nov 21 14:04:21 crc kubenswrapper[5133]: I1121 14:04:21.112509 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b6dbdb6f5-49x6h" event={"ID":"8d689e79-315a-499a-a76e-7e4a79cf977c","Type":"ContainerStarted","Data":"da6b9213a48da03b81dd6062a9b6ffa3b66f652a03079f4b178cbf2431cfe5a6"} Nov 21 14:04:21 crc kubenswrapper[5133]: I1121 14:04:21.113783 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-55d9467df6-xwjg4" Nov 21 14:04:21 crc kubenswrapper[5133]: I1121 14:04:21.141155 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5b6dbdb6f5-49x6h" podStartSLOduration=3.141121773 podStartE2EDuration="3.141121773s" podCreationTimestamp="2025-11-21 14:04:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 14:04:21.135263857 +0000 UTC m=+1320.933096105" watchObservedRunningTime="2025-11-21 14:04:21.141121773 +0000 UTC m=+1320.938954021" Nov 21 14:04:22 crc kubenswrapper[5133]: I1121 14:04:22.123160 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5b6dbdb6f5-49x6h" Nov 21 14:04:24 crc kubenswrapper[5133]: I1121 14:04:24.151048 5133 generic.go:334] "Generic (PLEG): container finished" podID="944fb1ae-3bf4-479a-863d-62867fdf5b82" containerID="1b012093ae77969b0a3c7baae1bd9fe3b1c9a037d5139292c62a7d2f76c2c18e" exitCode=0 Nov 21 14:04:24 crc kubenswrapper[5133]: I1121 14:04:24.151164 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-26kdp" event={"ID":"944fb1ae-3bf4-479a-863d-62867fdf5b82","Type":"ContainerDied","Data":"1b012093ae77969b0a3c7baae1bd9fe3b1c9a037d5139292c62a7d2f76c2c18e"} Nov 21 14:04:26 crc kubenswrapper[5133]: I1121 14:04:26.010764 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-26kdp" Nov 21 14:04:26 crc kubenswrapper[5133]: I1121 14:04:26.150248 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/944fb1ae-3bf4-479a-863d-62867fdf5b82-config\") pod \"944fb1ae-3bf4-479a-863d-62867fdf5b82\" (UID: \"944fb1ae-3bf4-479a-863d-62867fdf5b82\") " Nov 21 14:04:26 crc kubenswrapper[5133]: I1121 14:04:26.150308 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/944fb1ae-3bf4-479a-863d-62867fdf5b82-combined-ca-bundle\") pod \"944fb1ae-3bf4-479a-863d-62867fdf5b82\" (UID: \"944fb1ae-3bf4-479a-863d-62867fdf5b82\") " Nov 21 14:04:26 crc kubenswrapper[5133]: I1121 14:04:26.150361 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8nqc5\" (UniqueName: \"kubernetes.io/projected/944fb1ae-3bf4-479a-863d-62867fdf5b82-kube-api-access-8nqc5\") pod \"944fb1ae-3bf4-479a-863d-62867fdf5b82\" (UID: \"944fb1ae-3bf4-479a-863d-62867fdf5b82\") " Nov 21 14:04:26 crc kubenswrapper[5133]: I1121 14:04:26.155338 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/944fb1ae-3bf4-479a-863d-62867fdf5b82-kube-api-access-8nqc5" (OuterVolumeSpecName: "kube-api-access-8nqc5") pod "944fb1ae-3bf4-479a-863d-62867fdf5b82" (UID: "944fb1ae-3bf4-479a-863d-62867fdf5b82"). InnerVolumeSpecName "kube-api-access-8nqc5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:04:26 crc kubenswrapper[5133]: I1121 14:04:26.175777 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-26kdp" event={"ID":"944fb1ae-3bf4-479a-863d-62867fdf5b82","Type":"ContainerDied","Data":"d8790ab86076f38b1697628d6ce947bf594edbb2832a9fb0045bd7dad402868d"} Nov 21 14:04:26 crc kubenswrapper[5133]: I1121 14:04:26.175823 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d8790ab86076f38b1697628d6ce947bf594edbb2832a9fb0045bd7dad402868d" Nov 21 14:04:26 crc kubenswrapper[5133]: I1121 14:04:26.175892 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-26kdp" Nov 21 14:04:26 crc kubenswrapper[5133]: I1121 14:04:26.183792 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/944fb1ae-3bf4-479a-863d-62867fdf5b82-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "944fb1ae-3bf4-479a-863d-62867fdf5b82" (UID: "944fb1ae-3bf4-479a-863d-62867fdf5b82"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:04:26 crc kubenswrapper[5133]: I1121 14:04:26.193440 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/944fb1ae-3bf4-479a-863d-62867fdf5b82-config" (OuterVolumeSpecName: "config") pod "944fb1ae-3bf4-479a-863d-62867fdf5b82" (UID: "944fb1ae-3bf4-479a-863d-62867fdf5b82"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:04:26 crc kubenswrapper[5133]: I1121 14:04:26.253846 5133 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/944fb1ae-3bf4-479a-863d-62867fdf5b82-config\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:26 crc kubenswrapper[5133]: I1121 14:04:26.253921 5133 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/944fb1ae-3bf4-479a-863d-62867fdf5b82-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:26 crc kubenswrapper[5133]: I1121 14:04:26.253943 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8nqc5\" (UniqueName: \"kubernetes.io/projected/944fb1ae-3bf4-479a-863d-62867fdf5b82-kube-api-access-8nqc5\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:26 crc kubenswrapper[5133]: I1121 14:04:26.415655 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5b6dbdb6f5-49x6h"] Nov 21 14:04:26 crc kubenswrapper[5133]: I1121 14:04:26.421382 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5b6dbdb6f5-49x6h" podUID="8d689e79-315a-499a-a76e-7e4a79cf977c" containerName="dnsmasq-dns" containerID="cri-o://da6b9213a48da03b81dd6062a9b6ffa3b66f652a03079f4b178cbf2431cfe5a6" gracePeriod=10 Nov 21 14:04:26 crc kubenswrapper[5133]: I1121 14:04:26.432259 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5b6dbdb6f5-49x6h" Nov 21 14:04:26 crc kubenswrapper[5133]: I1121 14:04:26.477877 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5f66db59b9-q57jg"] Nov 21 14:04:26 crc kubenswrapper[5133]: E1121 14:04:26.478268 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="944fb1ae-3bf4-479a-863d-62867fdf5b82" containerName="neutron-db-sync" Nov 21 14:04:26 crc kubenswrapper[5133]: I1121 14:04:26.478291 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="944fb1ae-3bf4-479a-863d-62867fdf5b82" containerName="neutron-db-sync" Nov 21 14:04:26 crc kubenswrapper[5133]: I1121 14:04:26.478476 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="944fb1ae-3bf4-479a-863d-62867fdf5b82" containerName="neutron-db-sync" Nov 21 14:04:26 crc kubenswrapper[5133]: I1121 14:04:26.479528 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f66db59b9-q57jg" Nov 21 14:04:26 crc kubenswrapper[5133]: I1121 14:04:26.509660 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5f66db59b9-q57jg"] Nov 21 14:04:26 crc kubenswrapper[5133]: I1121 14:04:26.566019 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7bgkt\" (UniqueName: \"kubernetes.io/projected/aa798184-6ef5-4fe7-8dd7-6454c7058f49-kube-api-access-7bgkt\") pod \"dnsmasq-dns-5f66db59b9-q57jg\" (UID: \"aa798184-6ef5-4fe7-8dd7-6454c7058f49\") " pod="openstack/dnsmasq-dns-5f66db59b9-q57jg" Nov 21 14:04:26 crc kubenswrapper[5133]: I1121 14:04:26.566090 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aa798184-6ef5-4fe7-8dd7-6454c7058f49-config\") pod \"dnsmasq-dns-5f66db59b9-q57jg\" (UID: \"aa798184-6ef5-4fe7-8dd7-6454c7058f49\") " pod="openstack/dnsmasq-dns-5f66db59b9-q57jg" Nov 21 14:04:26 crc kubenswrapper[5133]: I1121 14:04:26.566139 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/aa798184-6ef5-4fe7-8dd7-6454c7058f49-ovsdbserver-sb\") pod \"dnsmasq-dns-5f66db59b9-q57jg\" (UID: \"aa798184-6ef5-4fe7-8dd7-6454c7058f49\") " pod="openstack/dnsmasq-dns-5f66db59b9-q57jg" Nov 21 14:04:26 crc kubenswrapper[5133]: I1121 14:04:26.566315 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/aa798184-6ef5-4fe7-8dd7-6454c7058f49-dns-svc\") pod \"dnsmasq-dns-5f66db59b9-q57jg\" (UID: \"aa798184-6ef5-4fe7-8dd7-6454c7058f49\") " pod="openstack/dnsmasq-dns-5f66db59b9-q57jg" Nov 21 14:04:26 crc kubenswrapper[5133]: I1121 14:04:26.566349 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/aa798184-6ef5-4fe7-8dd7-6454c7058f49-ovsdbserver-nb\") pod \"dnsmasq-dns-5f66db59b9-q57jg\" (UID: \"aa798184-6ef5-4fe7-8dd7-6454c7058f49\") " pod="openstack/dnsmasq-dns-5f66db59b9-q57jg" Nov 21 14:04:26 crc kubenswrapper[5133]: I1121 14:04:26.667781 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7bgkt\" (UniqueName: \"kubernetes.io/projected/aa798184-6ef5-4fe7-8dd7-6454c7058f49-kube-api-access-7bgkt\") pod \"dnsmasq-dns-5f66db59b9-q57jg\" (UID: \"aa798184-6ef5-4fe7-8dd7-6454c7058f49\") " pod="openstack/dnsmasq-dns-5f66db59b9-q57jg" Nov 21 14:04:26 crc kubenswrapper[5133]: I1121 14:04:26.667849 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aa798184-6ef5-4fe7-8dd7-6454c7058f49-config\") pod \"dnsmasq-dns-5f66db59b9-q57jg\" (UID: \"aa798184-6ef5-4fe7-8dd7-6454c7058f49\") " pod="openstack/dnsmasq-dns-5f66db59b9-q57jg" Nov 21 14:04:26 crc kubenswrapper[5133]: I1121 14:04:26.667893 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/aa798184-6ef5-4fe7-8dd7-6454c7058f49-ovsdbserver-sb\") pod \"dnsmasq-dns-5f66db59b9-q57jg\" (UID: \"aa798184-6ef5-4fe7-8dd7-6454c7058f49\") " pod="openstack/dnsmasq-dns-5f66db59b9-q57jg" Nov 21 14:04:26 crc kubenswrapper[5133]: I1121 14:04:26.668030 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/aa798184-6ef5-4fe7-8dd7-6454c7058f49-dns-svc\") pod \"dnsmasq-dns-5f66db59b9-q57jg\" (UID: \"aa798184-6ef5-4fe7-8dd7-6454c7058f49\") " pod="openstack/dnsmasq-dns-5f66db59b9-q57jg" Nov 21 14:04:26 crc kubenswrapper[5133]: I1121 14:04:26.668066 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/aa798184-6ef5-4fe7-8dd7-6454c7058f49-ovsdbserver-nb\") pod \"dnsmasq-dns-5f66db59b9-q57jg\" (UID: \"aa798184-6ef5-4fe7-8dd7-6454c7058f49\") " pod="openstack/dnsmasq-dns-5f66db59b9-q57jg" Nov 21 14:04:26 crc kubenswrapper[5133]: I1121 14:04:26.669492 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aa798184-6ef5-4fe7-8dd7-6454c7058f49-config\") pod \"dnsmasq-dns-5f66db59b9-q57jg\" (UID: \"aa798184-6ef5-4fe7-8dd7-6454c7058f49\") " pod="openstack/dnsmasq-dns-5f66db59b9-q57jg" Nov 21 14:04:26 crc kubenswrapper[5133]: I1121 14:04:26.670193 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/aa798184-6ef5-4fe7-8dd7-6454c7058f49-ovsdbserver-sb\") pod \"dnsmasq-dns-5f66db59b9-q57jg\" (UID: \"aa798184-6ef5-4fe7-8dd7-6454c7058f49\") " pod="openstack/dnsmasq-dns-5f66db59b9-q57jg" Nov 21 14:04:26 crc kubenswrapper[5133]: I1121 14:04:26.670855 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/aa798184-6ef5-4fe7-8dd7-6454c7058f49-dns-svc\") pod \"dnsmasq-dns-5f66db59b9-q57jg\" (UID: \"aa798184-6ef5-4fe7-8dd7-6454c7058f49\") " pod="openstack/dnsmasq-dns-5f66db59b9-q57jg" Nov 21 14:04:26 crc kubenswrapper[5133]: I1121 14:04:26.671489 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/aa798184-6ef5-4fe7-8dd7-6454c7058f49-ovsdbserver-nb\") pod \"dnsmasq-dns-5f66db59b9-q57jg\" (UID: \"aa798184-6ef5-4fe7-8dd7-6454c7058f49\") " pod="openstack/dnsmasq-dns-5f66db59b9-q57jg" Nov 21 14:04:26 crc kubenswrapper[5133]: I1121 14:04:26.676007 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-59475798db-b6bck"] Nov 21 14:04:26 crc kubenswrapper[5133]: I1121 14:04:26.680932 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-59475798db-b6bck" Nov 21 14:04:26 crc kubenswrapper[5133]: I1121 14:04:26.684354 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-5s6lj" Nov 21 14:04:26 crc kubenswrapper[5133]: I1121 14:04:26.684581 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 21 14:04:26 crc kubenswrapper[5133]: I1121 14:04:26.684808 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Nov 21 14:04:26 crc kubenswrapper[5133]: I1121 14:04:26.684971 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 21 14:04:26 crc kubenswrapper[5133]: I1121 14:04:26.690407 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7bgkt\" (UniqueName: \"kubernetes.io/projected/aa798184-6ef5-4fe7-8dd7-6454c7058f49-kube-api-access-7bgkt\") pod \"dnsmasq-dns-5f66db59b9-q57jg\" (UID: \"aa798184-6ef5-4fe7-8dd7-6454c7058f49\") " pod="openstack/dnsmasq-dns-5f66db59b9-q57jg" Nov 21 14:04:26 crc kubenswrapper[5133]: I1121 14:04:26.691168 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-59475798db-b6bck"] Nov 21 14:04:26 crc kubenswrapper[5133]: I1121 14:04:26.769680 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/b5beef58-6317-4739-9049-bb385e29db6c-httpd-config\") pod \"neutron-59475798db-b6bck\" (UID: \"b5beef58-6317-4739-9049-bb385e29db6c\") " pod="openstack/neutron-59475798db-b6bck" Nov 21 14:04:26 crc kubenswrapper[5133]: I1121 14:04:26.769726 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/b5beef58-6317-4739-9049-bb385e29db6c-ovndb-tls-certs\") pod \"neutron-59475798db-b6bck\" (UID: \"b5beef58-6317-4739-9049-bb385e29db6c\") " pod="openstack/neutron-59475798db-b6bck" Nov 21 14:04:26 crc kubenswrapper[5133]: I1121 14:04:26.769785 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5beef58-6317-4739-9049-bb385e29db6c-combined-ca-bundle\") pod \"neutron-59475798db-b6bck\" (UID: \"b5beef58-6317-4739-9049-bb385e29db6c\") " pod="openstack/neutron-59475798db-b6bck" Nov 21 14:04:26 crc kubenswrapper[5133]: I1121 14:04:26.769882 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k6vg9\" (UniqueName: \"kubernetes.io/projected/b5beef58-6317-4739-9049-bb385e29db6c-kube-api-access-k6vg9\") pod \"neutron-59475798db-b6bck\" (UID: \"b5beef58-6317-4739-9049-bb385e29db6c\") " pod="openstack/neutron-59475798db-b6bck" Nov 21 14:04:26 crc kubenswrapper[5133]: I1121 14:04:26.769932 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/b5beef58-6317-4739-9049-bb385e29db6c-config\") pod \"neutron-59475798db-b6bck\" (UID: \"b5beef58-6317-4739-9049-bb385e29db6c\") " pod="openstack/neutron-59475798db-b6bck" Nov 21 14:04:26 crc kubenswrapper[5133]: I1121 14:04:26.881446 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/b5beef58-6317-4739-9049-bb385e29db6c-config\") pod \"neutron-59475798db-b6bck\" (UID: \"b5beef58-6317-4739-9049-bb385e29db6c\") " pod="openstack/neutron-59475798db-b6bck" Nov 21 14:04:26 crc kubenswrapper[5133]: I1121 14:04:26.881626 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/b5beef58-6317-4739-9049-bb385e29db6c-httpd-config\") pod \"neutron-59475798db-b6bck\" (UID: \"b5beef58-6317-4739-9049-bb385e29db6c\") " pod="openstack/neutron-59475798db-b6bck" Nov 21 14:04:26 crc kubenswrapper[5133]: I1121 14:04:26.881658 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/b5beef58-6317-4739-9049-bb385e29db6c-ovndb-tls-certs\") pod \"neutron-59475798db-b6bck\" (UID: \"b5beef58-6317-4739-9049-bb385e29db6c\") " pod="openstack/neutron-59475798db-b6bck" Nov 21 14:04:26 crc kubenswrapper[5133]: I1121 14:04:26.882584 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5beef58-6317-4739-9049-bb385e29db6c-combined-ca-bundle\") pod \"neutron-59475798db-b6bck\" (UID: \"b5beef58-6317-4739-9049-bb385e29db6c\") " pod="openstack/neutron-59475798db-b6bck" Nov 21 14:04:26 crc kubenswrapper[5133]: I1121 14:04:26.882687 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k6vg9\" (UniqueName: \"kubernetes.io/projected/b5beef58-6317-4739-9049-bb385e29db6c-kube-api-access-k6vg9\") pod \"neutron-59475798db-b6bck\" (UID: \"b5beef58-6317-4739-9049-bb385e29db6c\") " pod="openstack/neutron-59475798db-b6bck" Nov 21 14:04:26 crc kubenswrapper[5133]: I1121 14:04:26.886644 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/b5beef58-6317-4739-9049-bb385e29db6c-httpd-config\") pod \"neutron-59475798db-b6bck\" (UID: \"b5beef58-6317-4739-9049-bb385e29db6c\") " pod="openstack/neutron-59475798db-b6bck" Nov 21 14:04:26 crc kubenswrapper[5133]: I1121 14:04:26.886758 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/b5beef58-6317-4739-9049-bb385e29db6c-ovndb-tls-certs\") pod \"neutron-59475798db-b6bck\" (UID: \"b5beef58-6317-4739-9049-bb385e29db6c\") " pod="openstack/neutron-59475798db-b6bck" Nov 21 14:04:26 crc kubenswrapper[5133]: I1121 14:04:26.888091 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/b5beef58-6317-4739-9049-bb385e29db6c-config\") pod \"neutron-59475798db-b6bck\" (UID: \"b5beef58-6317-4739-9049-bb385e29db6c\") " pod="openstack/neutron-59475798db-b6bck" Nov 21 14:04:26 crc kubenswrapper[5133]: I1121 14:04:26.892479 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5beef58-6317-4739-9049-bb385e29db6c-combined-ca-bundle\") pod \"neutron-59475798db-b6bck\" (UID: \"b5beef58-6317-4739-9049-bb385e29db6c\") " pod="openstack/neutron-59475798db-b6bck" Nov 21 14:04:26 crc kubenswrapper[5133]: I1121 14:04:26.902967 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k6vg9\" (UniqueName: \"kubernetes.io/projected/b5beef58-6317-4739-9049-bb385e29db6c-kube-api-access-k6vg9\") pod \"neutron-59475798db-b6bck\" (UID: \"b5beef58-6317-4739-9049-bb385e29db6c\") " pod="openstack/neutron-59475798db-b6bck" Nov 21 14:04:26 crc kubenswrapper[5133]: I1121 14:04:26.922145 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f66db59b9-q57jg" Nov 21 14:04:27 crc kubenswrapper[5133]: I1121 14:04:27.006862 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-59475798db-b6bck" Nov 21 14:04:27 crc kubenswrapper[5133]: I1121 14:04:27.196391 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b6dbdb6f5-49x6h" Nov 21 14:04:27 crc kubenswrapper[5133]: I1121 14:04:27.251987 5133 generic.go:334] "Generic (PLEG): container finished" podID="8d689e79-315a-499a-a76e-7e4a79cf977c" containerID="da6b9213a48da03b81dd6062a9b6ffa3b66f652a03079f4b178cbf2431cfe5a6" exitCode=0 Nov 21 14:04:27 crc kubenswrapper[5133]: I1121 14:04:27.252660 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b6dbdb6f5-49x6h" event={"ID":"8d689e79-315a-499a-a76e-7e4a79cf977c","Type":"ContainerDied","Data":"da6b9213a48da03b81dd6062a9b6ffa3b66f652a03079f4b178cbf2431cfe5a6"} Nov 21 14:04:27 crc kubenswrapper[5133]: I1121 14:04:27.252696 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b6dbdb6f5-49x6h" event={"ID":"8d689e79-315a-499a-a76e-7e4a79cf977c","Type":"ContainerDied","Data":"1bd7742c497547f249ec694ca3f190e34499c2df351e3c1f674365a27894bde5"} Nov 21 14:04:27 crc kubenswrapper[5133]: I1121 14:04:27.252734 5133 scope.go:117] "RemoveContainer" containerID="da6b9213a48da03b81dd6062a9b6ffa3b66f652a03079f4b178cbf2431cfe5a6" Nov 21 14:04:27 crc kubenswrapper[5133]: I1121 14:04:27.252754 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b6dbdb6f5-49x6h" Nov 21 14:04:27 crc kubenswrapper[5133]: I1121 14:04:27.273641 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-m47ck" event={"ID":"e5102115-b63e-42e2-8aae-1a68e7dda37c","Type":"ContainerStarted","Data":"d252e144cec13d98268fe83087b99764332e2dfbdac245e47d37e15c587dc6de"} Nov 21 14:04:27 crc kubenswrapper[5133]: I1121 14:04:27.290265 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8d689e79-315a-499a-a76e-7e4a79cf977c-ovsdbserver-sb\") pod \"8d689e79-315a-499a-a76e-7e4a79cf977c\" (UID: \"8d689e79-315a-499a-a76e-7e4a79cf977c\") " Nov 21 14:04:27 crc kubenswrapper[5133]: I1121 14:04:27.291244 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8d689e79-315a-499a-a76e-7e4a79cf977c-ovsdbserver-nb\") pod \"8d689e79-315a-499a-a76e-7e4a79cf977c\" (UID: \"8d689e79-315a-499a-a76e-7e4a79cf977c\") " Nov 21 14:04:27 crc kubenswrapper[5133]: I1121 14:04:27.291326 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8d689e79-315a-499a-a76e-7e4a79cf977c-config\") pod \"8d689e79-315a-499a-a76e-7e4a79cf977c\" (UID: \"8d689e79-315a-499a-a76e-7e4a79cf977c\") " Nov 21 14:04:27 crc kubenswrapper[5133]: I1121 14:04:27.291482 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wgxt5\" (UniqueName: \"kubernetes.io/projected/8d689e79-315a-499a-a76e-7e4a79cf977c-kube-api-access-wgxt5\") pod \"8d689e79-315a-499a-a76e-7e4a79cf977c\" (UID: \"8d689e79-315a-499a-a76e-7e4a79cf977c\") " Nov 21 14:04:27 crc kubenswrapper[5133]: I1121 14:04:27.291525 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8d689e79-315a-499a-a76e-7e4a79cf977c-dns-svc\") pod \"8d689e79-315a-499a-a76e-7e4a79cf977c\" (UID: \"8d689e79-315a-499a-a76e-7e4a79cf977c\") " Nov 21 14:04:27 crc kubenswrapper[5133]: I1121 14:04:27.298667 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"550fe8e4-32aa-40c5-b925-f6c496dfcdf2","Type":"ContainerStarted","Data":"ea3fc9fd20a2cda56e5e511dd519affc6c9d9bf7f973aca62480718ee185f18f"} Nov 21 14:04:27 crc kubenswrapper[5133]: I1121 14:04:27.298949 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="550fe8e4-32aa-40c5-b925-f6c496dfcdf2" containerName="ceilometer-central-agent" containerID="cri-o://625eee991bc80fa1055ba56cde17b404ac9090cc104cacfac4de0f0dad9fed89" gracePeriod=30 Nov 21 14:04:27 crc kubenswrapper[5133]: I1121 14:04:27.299359 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 21 14:04:27 crc kubenswrapper[5133]: I1121 14:04:27.299763 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="550fe8e4-32aa-40c5-b925-f6c496dfcdf2" containerName="proxy-httpd" containerID="cri-o://ea3fc9fd20a2cda56e5e511dd519affc6c9d9bf7f973aca62480718ee185f18f" gracePeriod=30 Nov 21 14:04:27 crc kubenswrapper[5133]: I1121 14:04:27.299850 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="550fe8e4-32aa-40c5-b925-f6c496dfcdf2" containerName="sg-core" containerID="cri-o://dc316e1cedd0ed9d7085a3a50a7fd9974b26effca798e7c0d6309345c02ddbf7" gracePeriod=30 Nov 21 14:04:27 crc kubenswrapper[5133]: I1121 14:04:27.299897 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="550fe8e4-32aa-40c5-b925-f6c496dfcdf2" containerName="ceilometer-notification-agent" containerID="cri-o://aa9e852b69c878b1e79e5d9dc3a3cdc83f92cf8141013963bfee444cf47ef062" gracePeriod=30 Nov 21 14:04:27 crc kubenswrapper[5133]: I1121 14:04:27.306250 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-m47ck" podStartSLOduration=2.8881528210000003 podStartE2EDuration="43.30622556s" podCreationTimestamp="2025-11-21 14:03:44 +0000 UTC" firstStartedPulling="2025-11-21 14:03:45.644408209 +0000 UTC m=+1285.442240457" lastFinishedPulling="2025-11-21 14:04:26.062480928 +0000 UTC m=+1325.860313196" observedRunningTime="2025-11-21 14:04:27.297710003 +0000 UTC m=+1327.095542251" watchObservedRunningTime="2025-11-21 14:04:27.30622556 +0000 UTC m=+1327.104057808" Nov 21 14:04:27 crc kubenswrapper[5133]: I1121 14:04:27.326347 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8d689e79-315a-499a-a76e-7e4a79cf977c-kube-api-access-wgxt5" (OuterVolumeSpecName: "kube-api-access-wgxt5") pod "8d689e79-315a-499a-a76e-7e4a79cf977c" (UID: "8d689e79-315a-499a-a76e-7e4a79cf977c"). InnerVolumeSpecName "kube-api-access-wgxt5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:04:27 crc kubenswrapper[5133]: I1121 14:04:27.373126 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.028564791 podStartE2EDuration="43.373100197s" podCreationTimestamp="2025-11-21 14:03:44 +0000 UTC" firstStartedPulling="2025-11-21 14:03:45.73395859 +0000 UTC m=+1285.531790838" lastFinishedPulling="2025-11-21 14:04:26.078493996 +0000 UTC m=+1325.876326244" observedRunningTime="2025-11-21 14:04:27.340453225 +0000 UTC m=+1327.138285473" watchObservedRunningTime="2025-11-21 14:04:27.373100197 +0000 UTC m=+1327.170932445" Nov 21 14:04:27 crc kubenswrapper[5133]: I1121 14:04:27.377935 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8d689e79-315a-499a-a76e-7e4a79cf977c-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "8d689e79-315a-499a-a76e-7e4a79cf977c" (UID: "8d689e79-315a-499a-a76e-7e4a79cf977c"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:04:27 crc kubenswrapper[5133]: I1121 14:04:27.378117 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8d689e79-315a-499a-a76e-7e4a79cf977c-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "8d689e79-315a-499a-a76e-7e4a79cf977c" (UID: "8d689e79-315a-499a-a76e-7e4a79cf977c"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:04:27 crc kubenswrapper[5133]: I1121 14:04:27.390471 5133 scope.go:117] "RemoveContainer" containerID="739c3ec9d7d35f572e69ca607334f41b9cb4f2834f4266965e36d3f773b9ae89" Nov 21 14:04:27 crc kubenswrapper[5133]: I1121 14:04:27.392866 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8d689e79-315a-499a-a76e-7e4a79cf977c-config" (OuterVolumeSpecName: "config") pod "8d689e79-315a-499a-a76e-7e4a79cf977c" (UID: "8d689e79-315a-499a-a76e-7e4a79cf977c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:04:27 crc kubenswrapper[5133]: I1121 14:04:27.405491 5133 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8d689e79-315a-499a-a76e-7e4a79cf977c-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:27 crc kubenswrapper[5133]: I1121 14:04:27.405541 5133 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8d689e79-315a-499a-a76e-7e4a79cf977c-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:27 crc kubenswrapper[5133]: I1121 14:04:27.405552 5133 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8d689e79-315a-499a-a76e-7e4a79cf977c-config\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:27 crc kubenswrapper[5133]: I1121 14:04:27.405562 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wgxt5\" (UniqueName: \"kubernetes.io/projected/8d689e79-315a-499a-a76e-7e4a79cf977c-kube-api-access-wgxt5\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:27 crc kubenswrapper[5133]: I1121 14:04:27.429955 5133 scope.go:117] "RemoveContainer" containerID="da6b9213a48da03b81dd6062a9b6ffa3b66f652a03079f4b178cbf2431cfe5a6" Nov 21 14:04:27 crc kubenswrapper[5133]: I1121 14:04:27.431382 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8d689e79-315a-499a-a76e-7e4a79cf977c-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "8d689e79-315a-499a-a76e-7e4a79cf977c" (UID: "8d689e79-315a-499a-a76e-7e4a79cf977c"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:04:27 crc kubenswrapper[5133]: E1121 14:04:27.431838 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"da6b9213a48da03b81dd6062a9b6ffa3b66f652a03079f4b178cbf2431cfe5a6\": container with ID starting with da6b9213a48da03b81dd6062a9b6ffa3b66f652a03079f4b178cbf2431cfe5a6 not found: ID does not exist" containerID="da6b9213a48da03b81dd6062a9b6ffa3b66f652a03079f4b178cbf2431cfe5a6" Nov 21 14:04:27 crc kubenswrapper[5133]: I1121 14:04:27.431876 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"da6b9213a48da03b81dd6062a9b6ffa3b66f652a03079f4b178cbf2431cfe5a6"} err="failed to get container status \"da6b9213a48da03b81dd6062a9b6ffa3b66f652a03079f4b178cbf2431cfe5a6\": rpc error: code = NotFound desc = could not find container \"da6b9213a48da03b81dd6062a9b6ffa3b66f652a03079f4b178cbf2431cfe5a6\": container with ID starting with da6b9213a48da03b81dd6062a9b6ffa3b66f652a03079f4b178cbf2431cfe5a6 not found: ID does not exist" Nov 21 14:04:27 crc kubenswrapper[5133]: I1121 14:04:27.431901 5133 scope.go:117] "RemoveContainer" containerID="739c3ec9d7d35f572e69ca607334f41b9cb4f2834f4266965e36d3f773b9ae89" Nov 21 14:04:27 crc kubenswrapper[5133]: E1121 14:04:27.432203 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"739c3ec9d7d35f572e69ca607334f41b9cb4f2834f4266965e36d3f773b9ae89\": container with ID starting with 739c3ec9d7d35f572e69ca607334f41b9cb4f2834f4266965e36d3f773b9ae89 not found: ID does not exist" containerID="739c3ec9d7d35f572e69ca607334f41b9cb4f2834f4266965e36d3f773b9ae89" Nov 21 14:04:27 crc kubenswrapper[5133]: I1121 14:04:27.432222 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"739c3ec9d7d35f572e69ca607334f41b9cb4f2834f4266965e36d3f773b9ae89"} err="failed to get container status \"739c3ec9d7d35f572e69ca607334f41b9cb4f2834f4266965e36d3f773b9ae89\": rpc error: code = NotFound desc = could not find container \"739c3ec9d7d35f572e69ca607334f41b9cb4f2834f4266965e36d3f773b9ae89\": container with ID starting with 739c3ec9d7d35f572e69ca607334f41b9cb4f2834f4266965e36d3f773b9ae89 not found: ID does not exist" Nov 21 14:04:27 crc kubenswrapper[5133]: I1121 14:04:27.478317 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5f66db59b9-q57jg"] Nov 21 14:04:27 crc kubenswrapper[5133]: I1121 14:04:27.507180 5133 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8d689e79-315a-499a-a76e-7e4a79cf977c-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:27 crc kubenswrapper[5133]: I1121 14:04:27.597092 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5b6dbdb6f5-49x6h"] Nov 21 14:04:27 crc kubenswrapper[5133]: I1121 14:04:27.611210 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5b6dbdb6f5-49x6h"] Nov 21 14:04:27 crc kubenswrapper[5133]: I1121 14:04:27.731303 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-59475798db-b6bck"] Nov 21 14:04:27 crc kubenswrapper[5133]: W1121 14:04:27.736800 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb5beef58_6317_4739_9049_bb385e29db6c.slice/crio-2c559cc42e14a3c1cf6b828f27da77ed04fc9138115b34056acaf949e64bb8d4 WatchSource:0}: Error finding container 2c559cc42e14a3c1cf6b828f27da77ed04fc9138115b34056acaf949e64bb8d4: Status 404 returned error can't find the container with id 2c559cc42e14a3c1cf6b828f27da77ed04fc9138115b34056acaf949e64bb8d4 Nov 21 14:04:28 crc kubenswrapper[5133]: I1121 14:04:28.312153 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-59475798db-b6bck" event={"ID":"b5beef58-6317-4739-9049-bb385e29db6c","Type":"ContainerStarted","Data":"2c559cc42e14a3c1cf6b828f27da77ed04fc9138115b34056acaf949e64bb8d4"} Nov 21 14:04:28 crc kubenswrapper[5133]: I1121 14:04:28.315595 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-8cz2f" event={"ID":"b61488dd-2db1-43b5-996b-43b76a5dbda6","Type":"ContainerStarted","Data":"3494c50fa90e29715228d64226f1a7d95710c4cb930d522c966e254df0e38021"} Nov 21 14:04:28 crc kubenswrapper[5133]: I1121 14:04:28.318165 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f66db59b9-q57jg" event={"ID":"aa798184-6ef5-4fe7-8dd7-6454c7058f49","Type":"ContainerStarted","Data":"e8c507abbf7d084a45d8e10d6b3deeab13496d6dacc1664c5854acf7480561db"} Nov 21 14:04:28 crc kubenswrapper[5133]: I1121 14:04:28.322901 5133 generic.go:334] "Generic (PLEG): container finished" podID="550fe8e4-32aa-40c5-b925-f6c496dfcdf2" containerID="ea3fc9fd20a2cda56e5e511dd519affc6c9d9bf7f973aca62480718ee185f18f" exitCode=0 Nov 21 14:04:28 crc kubenswrapper[5133]: I1121 14:04:28.322938 5133 generic.go:334] "Generic (PLEG): container finished" podID="550fe8e4-32aa-40c5-b925-f6c496dfcdf2" containerID="dc316e1cedd0ed9d7085a3a50a7fd9974b26effca798e7c0d6309345c02ddbf7" exitCode=2 Nov 21 14:04:28 crc kubenswrapper[5133]: I1121 14:04:28.322959 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"550fe8e4-32aa-40c5-b925-f6c496dfcdf2","Type":"ContainerDied","Data":"ea3fc9fd20a2cda56e5e511dd519affc6c9d9bf7f973aca62480718ee185f18f"} Nov 21 14:04:28 crc kubenswrapper[5133]: I1121 14:04:28.322988 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"550fe8e4-32aa-40c5-b925-f6c496dfcdf2","Type":"ContainerDied","Data":"dc316e1cedd0ed9d7085a3a50a7fd9974b26effca798e7c0d6309345c02ddbf7"} Nov 21 14:04:28 crc kubenswrapper[5133]: I1121 14:04:28.342950 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-8cz2f" podStartSLOduration=3.535917753 podStartE2EDuration="44.342929201s" podCreationTimestamp="2025-11-21 14:03:44 +0000 UTC" firstStartedPulling="2025-11-21 14:03:45.25546813 +0000 UTC m=+1285.053300378" lastFinishedPulling="2025-11-21 14:04:26.062479578 +0000 UTC m=+1325.860311826" observedRunningTime="2025-11-21 14:04:28.337288691 +0000 UTC m=+1328.135120949" watchObservedRunningTime="2025-11-21 14:04:28.342929201 +0000 UTC m=+1328.140761449" Nov 21 14:04:28 crc kubenswrapper[5133]: I1121 14:04:28.470312 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8d689e79-315a-499a-a76e-7e4a79cf977c" path="/var/lib/kubelet/pods/8d689e79-315a-499a-a76e-7e4a79cf977c/volumes" Nov 21 14:04:28 crc kubenswrapper[5133]: I1121 14:04:28.988086 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-6f8dcf7d6f-7dxkg"] Nov 21 14:04:28 crc kubenswrapper[5133]: E1121 14:04:28.988617 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d689e79-315a-499a-a76e-7e4a79cf977c" containerName="init" Nov 21 14:04:28 crc kubenswrapper[5133]: I1121 14:04:28.988639 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d689e79-315a-499a-a76e-7e4a79cf977c" containerName="init" Nov 21 14:04:28 crc kubenswrapper[5133]: E1121 14:04:28.988670 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d689e79-315a-499a-a76e-7e4a79cf977c" containerName="dnsmasq-dns" Nov 21 14:04:28 crc kubenswrapper[5133]: I1121 14:04:28.988679 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d689e79-315a-499a-a76e-7e4a79cf977c" containerName="dnsmasq-dns" Nov 21 14:04:28 crc kubenswrapper[5133]: I1121 14:04:28.988866 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="8d689e79-315a-499a-a76e-7e4a79cf977c" containerName="dnsmasq-dns" Nov 21 14:04:28 crc kubenswrapper[5133]: I1121 14:04:28.989973 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-6f8dcf7d6f-7dxkg" Nov 21 14:04:29 crc kubenswrapper[5133]: I1121 14:04:29.000330 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-6f8dcf7d6f-7dxkg"] Nov 21 14:04:29 crc kubenswrapper[5133]: I1121 14:04:29.000760 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Nov 21 14:04:29 crc kubenswrapper[5133]: I1121 14:04:29.001043 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Nov 21 14:04:29 crc kubenswrapper[5133]: I1121 14:04:29.139700 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kmsv7\" (UniqueName: \"kubernetes.io/projected/ac4cef87-965c-4905-b980-0dd6591d9317-kube-api-access-kmsv7\") pod \"neutron-6f8dcf7d6f-7dxkg\" (UID: \"ac4cef87-965c-4905-b980-0dd6591d9317\") " pod="openstack/neutron-6f8dcf7d6f-7dxkg" Nov 21 14:04:29 crc kubenswrapper[5133]: I1121 14:04:29.140032 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac4cef87-965c-4905-b980-0dd6591d9317-combined-ca-bundle\") pod \"neutron-6f8dcf7d6f-7dxkg\" (UID: \"ac4cef87-965c-4905-b980-0dd6591d9317\") " pod="openstack/neutron-6f8dcf7d6f-7dxkg" Nov 21 14:04:29 crc kubenswrapper[5133]: I1121 14:04:29.140286 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ac4cef87-965c-4905-b980-0dd6591d9317-internal-tls-certs\") pod \"neutron-6f8dcf7d6f-7dxkg\" (UID: \"ac4cef87-965c-4905-b980-0dd6591d9317\") " pod="openstack/neutron-6f8dcf7d6f-7dxkg" Nov 21 14:04:29 crc kubenswrapper[5133]: I1121 14:04:29.140368 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/ac4cef87-965c-4905-b980-0dd6591d9317-httpd-config\") pod \"neutron-6f8dcf7d6f-7dxkg\" (UID: \"ac4cef87-965c-4905-b980-0dd6591d9317\") " pod="openstack/neutron-6f8dcf7d6f-7dxkg" Nov 21 14:04:29 crc kubenswrapper[5133]: I1121 14:04:29.140486 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ac4cef87-965c-4905-b980-0dd6591d9317-public-tls-certs\") pod \"neutron-6f8dcf7d6f-7dxkg\" (UID: \"ac4cef87-965c-4905-b980-0dd6591d9317\") " pod="openstack/neutron-6f8dcf7d6f-7dxkg" Nov 21 14:04:29 crc kubenswrapper[5133]: I1121 14:04:29.140560 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/ac4cef87-965c-4905-b980-0dd6591d9317-config\") pod \"neutron-6f8dcf7d6f-7dxkg\" (UID: \"ac4cef87-965c-4905-b980-0dd6591d9317\") " pod="openstack/neutron-6f8dcf7d6f-7dxkg" Nov 21 14:04:29 crc kubenswrapper[5133]: I1121 14:04:29.140704 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/ac4cef87-965c-4905-b980-0dd6591d9317-ovndb-tls-certs\") pod \"neutron-6f8dcf7d6f-7dxkg\" (UID: \"ac4cef87-965c-4905-b980-0dd6591d9317\") " pod="openstack/neutron-6f8dcf7d6f-7dxkg" Nov 21 14:04:29 crc kubenswrapper[5133]: I1121 14:04:29.242641 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kmsv7\" (UniqueName: \"kubernetes.io/projected/ac4cef87-965c-4905-b980-0dd6591d9317-kube-api-access-kmsv7\") pod \"neutron-6f8dcf7d6f-7dxkg\" (UID: \"ac4cef87-965c-4905-b980-0dd6591d9317\") " pod="openstack/neutron-6f8dcf7d6f-7dxkg" Nov 21 14:04:29 crc kubenswrapper[5133]: I1121 14:04:29.243274 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac4cef87-965c-4905-b980-0dd6591d9317-combined-ca-bundle\") pod \"neutron-6f8dcf7d6f-7dxkg\" (UID: \"ac4cef87-965c-4905-b980-0dd6591d9317\") " pod="openstack/neutron-6f8dcf7d6f-7dxkg" Nov 21 14:04:29 crc kubenswrapper[5133]: I1121 14:04:29.244305 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ac4cef87-965c-4905-b980-0dd6591d9317-internal-tls-certs\") pod \"neutron-6f8dcf7d6f-7dxkg\" (UID: \"ac4cef87-965c-4905-b980-0dd6591d9317\") " pod="openstack/neutron-6f8dcf7d6f-7dxkg" Nov 21 14:04:29 crc kubenswrapper[5133]: I1121 14:04:29.244457 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/ac4cef87-965c-4905-b980-0dd6591d9317-httpd-config\") pod \"neutron-6f8dcf7d6f-7dxkg\" (UID: \"ac4cef87-965c-4905-b980-0dd6591d9317\") " pod="openstack/neutron-6f8dcf7d6f-7dxkg" Nov 21 14:04:29 crc kubenswrapper[5133]: I1121 14:04:29.244600 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ac4cef87-965c-4905-b980-0dd6591d9317-public-tls-certs\") pod \"neutron-6f8dcf7d6f-7dxkg\" (UID: \"ac4cef87-965c-4905-b980-0dd6591d9317\") " pod="openstack/neutron-6f8dcf7d6f-7dxkg" Nov 21 14:04:29 crc kubenswrapper[5133]: I1121 14:04:29.244719 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/ac4cef87-965c-4905-b980-0dd6591d9317-config\") pod \"neutron-6f8dcf7d6f-7dxkg\" (UID: \"ac4cef87-965c-4905-b980-0dd6591d9317\") " pod="openstack/neutron-6f8dcf7d6f-7dxkg" Nov 21 14:04:29 crc kubenswrapper[5133]: I1121 14:04:29.244884 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/ac4cef87-965c-4905-b980-0dd6591d9317-ovndb-tls-certs\") pod \"neutron-6f8dcf7d6f-7dxkg\" (UID: \"ac4cef87-965c-4905-b980-0dd6591d9317\") " pod="openstack/neutron-6f8dcf7d6f-7dxkg" Nov 21 14:04:29 crc kubenswrapper[5133]: I1121 14:04:29.249604 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac4cef87-965c-4905-b980-0dd6591d9317-combined-ca-bundle\") pod \"neutron-6f8dcf7d6f-7dxkg\" (UID: \"ac4cef87-965c-4905-b980-0dd6591d9317\") " pod="openstack/neutron-6f8dcf7d6f-7dxkg" Nov 21 14:04:29 crc kubenswrapper[5133]: I1121 14:04:29.251097 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/ac4cef87-965c-4905-b980-0dd6591d9317-config\") pod \"neutron-6f8dcf7d6f-7dxkg\" (UID: \"ac4cef87-965c-4905-b980-0dd6591d9317\") " pod="openstack/neutron-6f8dcf7d6f-7dxkg" Nov 21 14:04:29 crc kubenswrapper[5133]: I1121 14:04:29.251178 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/ac4cef87-965c-4905-b980-0dd6591d9317-ovndb-tls-certs\") pod \"neutron-6f8dcf7d6f-7dxkg\" (UID: \"ac4cef87-965c-4905-b980-0dd6591d9317\") " pod="openstack/neutron-6f8dcf7d6f-7dxkg" Nov 21 14:04:29 crc kubenswrapper[5133]: I1121 14:04:29.252331 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/ac4cef87-965c-4905-b980-0dd6591d9317-httpd-config\") pod \"neutron-6f8dcf7d6f-7dxkg\" (UID: \"ac4cef87-965c-4905-b980-0dd6591d9317\") " pod="openstack/neutron-6f8dcf7d6f-7dxkg" Nov 21 14:04:29 crc kubenswrapper[5133]: I1121 14:04:29.253335 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ac4cef87-965c-4905-b980-0dd6591d9317-internal-tls-certs\") pod \"neutron-6f8dcf7d6f-7dxkg\" (UID: \"ac4cef87-965c-4905-b980-0dd6591d9317\") " pod="openstack/neutron-6f8dcf7d6f-7dxkg" Nov 21 14:04:29 crc kubenswrapper[5133]: I1121 14:04:29.254508 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ac4cef87-965c-4905-b980-0dd6591d9317-public-tls-certs\") pod \"neutron-6f8dcf7d6f-7dxkg\" (UID: \"ac4cef87-965c-4905-b980-0dd6591d9317\") " pod="openstack/neutron-6f8dcf7d6f-7dxkg" Nov 21 14:04:29 crc kubenswrapper[5133]: I1121 14:04:29.274053 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kmsv7\" (UniqueName: \"kubernetes.io/projected/ac4cef87-965c-4905-b980-0dd6591d9317-kube-api-access-kmsv7\") pod \"neutron-6f8dcf7d6f-7dxkg\" (UID: \"ac4cef87-965c-4905-b980-0dd6591d9317\") " pod="openstack/neutron-6f8dcf7d6f-7dxkg" Nov 21 14:04:29 crc kubenswrapper[5133]: I1121 14:04:29.316239 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-6f8dcf7d6f-7dxkg" Nov 21 14:04:29 crc kubenswrapper[5133]: I1121 14:04:29.333506 5133 generic.go:334] "Generic (PLEG): container finished" podID="550fe8e4-32aa-40c5-b925-f6c496dfcdf2" containerID="aa9e852b69c878b1e79e5d9dc3a3cdc83f92cf8141013963bfee444cf47ef062" exitCode=0 Nov 21 14:04:29 crc kubenswrapper[5133]: I1121 14:04:29.333547 5133 generic.go:334] "Generic (PLEG): container finished" podID="550fe8e4-32aa-40c5-b925-f6c496dfcdf2" containerID="625eee991bc80fa1055ba56cde17b404ac9090cc104cacfac4de0f0dad9fed89" exitCode=0 Nov 21 14:04:29 crc kubenswrapper[5133]: I1121 14:04:29.333570 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"550fe8e4-32aa-40c5-b925-f6c496dfcdf2","Type":"ContainerDied","Data":"aa9e852b69c878b1e79e5d9dc3a3cdc83f92cf8141013963bfee444cf47ef062"} Nov 21 14:04:29 crc kubenswrapper[5133]: I1121 14:04:29.333601 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"550fe8e4-32aa-40c5-b925-f6c496dfcdf2","Type":"ContainerDied","Data":"625eee991bc80fa1055ba56cde17b404ac9090cc104cacfac4de0f0dad9fed89"} Nov 21 14:04:29 crc kubenswrapper[5133]: I1121 14:04:29.695585 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-6f8dcf7d6f-7dxkg"] Nov 21 14:04:30 crc kubenswrapper[5133]: I1121 14:04:30.199650 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 14:04:30 crc kubenswrapper[5133]: I1121 14:04:30.344959 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"550fe8e4-32aa-40c5-b925-f6c496dfcdf2","Type":"ContainerDied","Data":"a802e6ea8163769d6f10ff6785d00458d9a768278dcf424c50bf80f589ff1b2d"} Nov 21 14:04:30 crc kubenswrapper[5133]: I1121 14:04:30.344989 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 14:04:30 crc kubenswrapper[5133]: I1121 14:04:30.345039 5133 scope.go:117] "RemoveContainer" containerID="ea3fc9fd20a2cda56e5e511dd519affc6c9d9bf7f973aca62480718ee185f18f" Nov 21 14:04:30 crc kubenswrapper[5133]: I1121 14:04:30.347808 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-59475798db-b6bck" event={"ID":"b5beef58-6317-4739-9049-bb385e29db6c","Type":"ContainerStarted","Data":"4b6e120e604fc4bba3789e42c9c03aa4b1991707de3c88953884d2eb05c1dcd3"} Nov 21 14:04:30 crc kubenswrapper[5133]: I1121 14:04:30.347852 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-59475798db-b6bck" event={"ID":"b5beef58-6317-4739-9049-bb385e29db6c","Type":"ContainerStarted","Data":"7d1cd23636e4324c731842ba619c6d612dc2d2f953197b3d531e19c7fe285c43"} Nov 21 14:04:30 crc kubenswrapper[5133]: I1121 14:04:30.348651 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-59475798db-b6bck" Nov 21 14:04:30 crc kubenswrapper[5133]: I1121 14:04:30.358250 5133 generic.go:334] "Generic (PLEG): container finished" podID="aa798184-6ef5-4fe7-8dd7-6454c7058f49" containerID="b16f5d43cf3fa8a6b30e26dc3094af70cd2c5abe6f365a65f677f166eb05bc2e" exitCode=0 Nov 21 14:04:30 crc kubenswrapper[5133]: I1121 14:04:30.358383 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f66db59b9-q57jg" event={"ID":"aa798184-6ef5-4fe7-8dd7-6454c7058f49","Type":"ContainerDied","Data":"b16f5d43cf3fa8a6b30e26dc3094af70cd2c5abe6f365a65f677f166eb05bc2e"} Nov 21 14:04:30 crc kubenswrapper[5133]: I1121 14:04:30.363128 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6f8dcf7d6f-7dxkg" event={"ID":"ac4cef87-965c-4905-b980-0dd6591d9317","Type":"ContainerStarted","Data":"a7b44bf058961de4dea513721d91ea8592a67c709bed7bb393df8ed12f902767"} Nov 21 14:04:30 crc kubenswrapper[5133]: I1121 14:04:30.363167 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6f8dcf7d6f-7dxkg" event={"ID":"ac4cef87-965c-4905-b980-0dd6591d9317","Type":"ContainerStarted","Data":"b0277a0522276dd959fe69f003bcfb43bceb3460ae865cf10b90b30c002ba1a3"} Nov 21 14:04:30 crc kubenswrapper[5133]: I1121 14:04:30.363183 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6f8dcf7d6f-7dxkg" event={"ID":"ac4cef87-965c-4905-b980-0dd6591d9317","Type":"ContainerStarted","Data":"3cf4d4cda34384efb80d2e65cb4233295642623c762583ef2ce60961ab9af215"} Nov 21 14:04:30 crc kubenswrapper[5133]: I1121 14:04:30.363325 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-6f8dcf7d6f-7dxkg" Nov 21 14:04:30 crc kubenswrapper[5133]: I1121 14:04:30.373918 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-59475798db-b6bck" podStartSLOduration=4.373891961 podStartE2EDuration="4.373891961s" podCreationTimestamp="2025-11-21 14:04:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 14:04:30.368824756 +0000 UTC m=+1330.166657004" watchObservedRunningTime="2025-11-21 14:04:30.373891961 +0000 UTC m=+1330.171724229" Nov 21 14:04:30 crc kubenswrapper[5133]: I1121 14:04:30.375843 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5trxn\" (UniqueName: \"kubernetes.io/projected/550fe8e4-32aa-40c5-b925-f6c496dfcdf2-kube-api-access-5trxn\") pod \"550fe8e4-32aa-40c5-b925-f6c496dfcdf2\" (UID: \"550fe8e4-32aa-40c5-b925-f6c496dfcdf2\") " Nov 21 14:04:30 crc kubenswrapper[5133]: I1121 14:04:30.375917 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/550fe8e4-32aa-40c5-b925-f6c496dfcdf2-scripts\") pod \"550fe8e4-32aa-40c5-b925-f6c496dfcdf2\" (UID: \"550fe8e4-32aa-40c5-b925-f6c496dfcdf2\") " Nov 21 14:04:30 crc kubenswrapper[5133]: I1121 14:04:30.376230 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/550fe8e4-32aa-40c5-b925-f6c496dfcdf2-log-httpd\") pod \"550fe8e4-32aa-40c5-b925-f6c496dfcdf2\" (UID: \"550fe8e4-32aa-40c5-b925-f6c496dfcdf2\") " Nov 21 14:04:30 crc kubenswrapper[5133]: I1121 14:04:30.376268 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/550fe8e4-32aa-40c5-b925-f6c496dfcdf2-combined-ca-bundle\") pod \"550fe8e4-32aa-40c5-b925-f6c496dfcdf2\" (UID: \"550fe8e4-32aa-40c5-b925-f6c496dfcdf2\") " Nov 21 14:04:30 crc kubenswrapper[5133]: I1121 14:04:30.376302 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/550fe8e4-32aa-40c5-b925-f6c496dfcdf2-config-data\") pod \"550fe8e4-32aa-40c5-b925-f6c496dfcdf2\" (UID: \"550fe8e4-32aa-40c5-b925-f6c496dfcdf2\") " Nov 21 14:04:30 crc kubenswrapper[5133]: I1121 14:04:30.376359 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/550fe8e4-32aa-40c5-b925-f6c496dfcdf2-sg-core-conf-yaml\") pod \"550fe8e4-32aa-40c5-b925-f6c496dfcdf2\" (UID: \"550fe8e4-32aa-40c5-b925-f6c496dfcdf2\") " Nov 21 14:04:30 crc kubenswrapper[5133]: I1121 14:04:30.376388 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/550fe8e4-32aa-40c5-b925-f6c496dfcdf2-run-httpd\") pod \"550fe8e4-32aa-40c5-b925-f6c496dfcdf2\" (UID: \"550fe8e4-32aa-40c5-b925-f6c496dfcdf2\") " Nov 21 14:04:30 crc kubenswrapper[5133]: I1121 14:04:30.378141 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/550fe8e4-32aa-40c5-b925-f6c496dfcdf2-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "550fe8e4-32aa-40c5-b925-f6c496dfcdf2" (UID: "550fe8e4-32aa-40c5-b925-f6c496dfcdf2"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:04:30 crc kubenswrapper[5133]: I1121 14:04:30.383281 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/550fe8e4-32aa-40c5-b925-f6c496dfcdf2-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "550fe8e4-32aa-40c5-b925-f6c496dfcdf2" (UID: "550fe8e4-32aa-40c5-b925-f6c496dfcdf2"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:04:30 crc kubenswrapper[5133]: I1121 14:04:30.383542 5133 scope.go:117] "RemoveContainer" containerID="dc316e1cedd0ed9d7085a3a50a7fd9974b26effca798e7c0d6309345c02ddbf7" Nov 21 14:04:30 crc kubenswrapper[5133]: I1121 14:04:30.388715 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/550fe8e4-32aa-40c5-b925-f6c496dfcdf2-scripts" (OuterVolumeSpecName: "scripts") pod "550fe8e4-32aa-40c5-b925-f6c496dfcdf2" (UID: "550fe8e4-32aa-40c5-b925-f6c496dfcdf2"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:04:30 crc kubenswrapper[5133]: I1121 14:04:30.410106 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/550fe8e4-32aa-40c5-b925-f6c496dfcdf2-kube-api-access-5trxn" (OuterVolumeSpecName: "kube-api-access-5trxn") pod "550fe8e4-32aa-40c5-b925-f6c496dfcdf2" (UID: "550fe8e4-32aa-40c5-b925-f6c496dfcdf2"). InnerVolumeSpecName "kube-api-access-5trxn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:04:30 crc kubenswrapper[5133]: I1121 14:04:30.412349 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-6f8dcf7d6f-7dxkg" podStartSLOduration=2.412323288 podStartE2EDuration="2.412323288s" podCreationTimestamp="2025-11-21 14:04:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 14:04:30.404154119 +0000 UTC m=+1330.201986387" watchObservedRunningTime="2025-11-21 14:04:30.412323288 +0000 UTC m=+1330.210155536" Nov 21 14:04:30 crc kubenswrapper[5133]: I1121 14:04:30.438360 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/550fe8e4-32aa-40c5-b925-f6c496dfcdf2-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "550fe8e4-32aa-40c5-b925-f6c496dfcdf2" (UID: "550fe8e4-32aa-40c5-b925-f6c496dfcdf2"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:04:30 crc kubenswrapper[5133]: I1121 14:04:30.481783 5133 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/550fe8e4-32aa-40c5-b925-f6c496dfcdf2-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:30 crc kubenswrapper[5133]: I1121 14:04:30.481848 5133 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/550fe8e4-32aa-40c5-b925-f6c496dfcdf2-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:30 crc kubenswrapper[5133]: I1121 14:04:30.481865 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5trxn\" (UniqueName: \"kubernetes.io/projected/550fe8e4-32aa-40c5-b925-f6c496dfcdf2-kube-api-access-5trxn\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:30 crc kubenswrapper[5133]: I1121 14:04:30.481880 5133 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/550fe8e4-32aa-40c5-b925-f6c496dfcdf2-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:30 crc kubenswrapper[5133]: I1121 14:04:30.481892 5133 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/550fe8e4-32aa-40c5-b925-f6c496dfcdf2-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:30 crc kubenswrapper[5133]: I1121 14:04:30.519024 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/550fe8e4-32aa-40c5-b925-f6c496dfcdf2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "550fe8e4-32aa-40c5-b925-f6c496dfcdf2" (UID: "550fe8e4-32aa-40c5-b925-f6c496dfcdf2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:04:30 crc kubenswrapper[5133]: I1121 14:04:30.566480 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/550fe8e4-32aa-40c5-b925-f6c496dfcdf2-config-data" (OuterVolumeSpecName: "config-data") pod "550fe8e4-32aa-40c5-b925-f6c496dfcdf2" (UID: "550fe8e4-32aa-40c5-b925-f6c496dfcdf2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:04:30 crc kubenswrapper[5133]: I1121 14:04:30.584363 5133 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/550fe8e4-32aa-40c5-b925-f6c496dfcdf2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:30 crc kubenswrapper[5133]: I1121 14:04:30.584421 5133 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/550fe8e4-32aa-40c5-b925-f6c496dfcdf2-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:30 crc kubenswrapper[5133]: I1121 14:04:30.621324 5133 scope.go:117] "RemoveContainer" containerID="aa9e852b69c878b1e79e5d9dc3a3cdc83f92cf8141013963bfee444cf47ef062" Nov 21 14:04:30 crc kubenswrapper[5133]: I1121 14:04:30.650824 5133 scope.go:117] "RemoveContainer" containerID="625eee991bc80fa1055ba56cde17b404ac9090cc104cacfac4de0f0dad9fed89" Nov 21 14:04:30 crc kubenswrapper[5133]: I1121 14:04:30.713588 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 21 14:04:30 crc kubenswrapper[5133]: I1121 14:04:30.720057 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 21 14:04:30 crc kubenswrapper[5133]: I1121 14:04:30.766422 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 21 14:04:30 crc kubenswrapper[5133]: E1121 14:04:30.766856 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="550fe8e4-32aa-40c5-b925-f6c496dfcdf2" containerName="ceilometer-central-agent" Nov 21 14:04:30 crc kubenswrapper[5133]: I1121 14:04:30.766874 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="550fe8e4-32aa-40c5-b925-f6c496dfcdf2" containerName="ceilometer-central-agent" Nov 21 14:04:30 crc kubenswrapper[5133]: E1121 14:04:30.766892 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="550fe8e4-32aa-40c5-b925-f6c496dfcdf2" containerName="proxy-httpd" Nov 21 14:04:30 crc kubenswrapper[5133]: I1121 14:04:30.766899 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="550fe8e4-32aa-40c5-b925-f6c496dfcdf2" containerName="proxy-httpd" Nov 21 14:04:30 crc kubenswrapper[5133]: E1121 14:04:30.766927 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="550fe8e4-32aa-40c5-b925-f6c496dfcdf2" containerName="ceilometer-notification-agent" Nov 21 14:04:30 crc kubenswrapper[5133]: I1121 14:04:30.766935 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="550fe8e4-32aa-40c5-b925-f6c496dfcdf2" containerName="ceilometer-notification-agent" Nov 21 14:04:30 crc kubenswrapper[5133]: E1121 14:04:30.766948 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="550fe8e4-32aa-40c5-b925-f6c496dfcdf2" containerName="sg-core" Nov 21 14:04:30 crc kubenswrapper[5133]: I1121 14:04:30.766953 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="550fe8e4-32aa-40c5-b925-f6c496dfcdf2" containerName="sg-core" Nov 21 14:04:30 crc kubenswrapper[5133]: I1121 14:04:30.767121 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="550fe8e4-32aa-40c5-b925-f6c496dfcdf2" containerName="ceilometer-notification-agent" Nov 21 14:04:30 crc kubenswrapper[5133]: I1121 14:04:30.767146 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="550fe8e4-32aa-40c5-b925-f6c496dfcdf2" containerName="sg-core" Nov 21 14:04:30 crc kubenswrapper[5133]: I1121 14:04:30.767156 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="550fe8e4-32aa-40c5-b925-f6c496dfcdf2" containerName="proxy-httpd" Nov 21 14:04:30 crc kubenswrapper[5133]: I1121 14:04:30.767168 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="550fe8e4-32aa-40c5-b925-f6c496dfcdf2" containerName="ceilometer-central-agent" Nov 21 14:04:30 crc kubenswrapper[5133]: I1121 14:04:30.768878 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 14:04:30 crc kubenswrapper[5133]: I1121 14:04:30.772461 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 21 14:04:30 crc kubenswrapper[5133]: I1121 14:04:30.773162 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 21 14:04:30 crc kubenswrapper[5133]: I1121 14:04:30.782307 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 21 14:04:30 crc kubenswrapper[5133]: I1121 14:04:30.892490 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bnxcq\" (UniqueName: \"kubernetes.io/projected/657c535b-7845-46e6-8a0e-57a1c694bae5-kube-api-access-bnxcq\") pod \"ceilometer-0\" (UID: \"657c535b-7845-46e6-8a0e-57a1c694bae5\") " pod="openstack/ceilometer-0" Nov 21 14:04:30 crc kubenswrapper[5133]: I1121 14:04:30.892615 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/657c535b-7845-46e6-8a0e-57a1c694bae5-run-httpd\") pod \"ceilometer-0\" (UID: \"657c535b-7845-46e6-8a0e-57a1c694bae5\") " pod="openstack/ceilometer-0" Nov 21 14:04:30 crc kubenswrapper[5133]: I1121 14:04:30.892646 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/657c535b-7845-46e6-8a0e-57a1c694bae5-scripts\") pod \"ceilometer-0\" (UID: \"657c535b-7845-46e6-8a0e-57a1c694bae5\") " pod="openstack/ceilometer-0" Nov 21 14:04:30 crc kubenswrapper[5133]: I1121 14:04:30.892672 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/657c535b-7845-46e6-8a0e-57a1c694bae5-config-data\") pod \"ceilometer-0\" (UID: \"657c535b-7845-46e6-8a0e-57a1c694bae5\") " pod="openstack/ceilometer-0" Nov 21 14:04:30 crc kubenswrapper[5133]: I1121 14:04:30.892689 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/657c535b-7845-46e6-8a0e-57a1c694bae5-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"657c535b-7845-46e6-8a0e-57a1c694bae5\") " pod="openstack/ceilometer-0" Nov 21 14:04:30 crc kubenswrapper[5133]: I1121 14:04:30.892736 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/657c535b-7845-46e6-8a0e-57a1c694bae5-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"657c535b-7845-46e6-8a0e-57a1c694bae5\") " pod="openstack/ceilometer-0" Nov 21 14:04:30 crc kubenswrapper[5133]: I1121 14:04:30.892781 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/657c535b-7845-46e6-8a0e-57a1c694bae5-log-httpd\") pod \"ceilometer-0\" (UID: \"657c535b-7845-46e6-8a0e-57a1c694bae5\") " pod="openstack/ceilometer-0" Nov 21 14:04:30 crc kubenswrapper[5133]: I1121 14:04:30.994287 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/657c535b-7845-46e6-8a0e-57a1c694bae5-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"657c535b-7845-46e6-8a0e-57a1c694bae5\") " pod="openstack/ceilometer-0" Nov 21 14:04:30 crc kubenswrapper[5133]: I1121 14:04:30.994664 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/657c535b-7845-46e6-8a0e-57a1c694bae5-log-httpd\") pod \"ceilometer-0\" (UID: \"657c535b-7845-46e6-8a0e-57a1c694bae5\") " pod="openstack/ceilometer-0" Nov 21 14:04:30 crc kubenswrapper[5133]: I1121 14:04:30.994770 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bnxcq\" (UniqueName: \"kubernetes.io/projected/657c535b-7845-46e6-8a0e-57a1c694bae5-kube-api-access-bnxcq\") pod \"ceilometer-0\" (UID: \"657c535b-7845-46e6-8a0e-57a1c694bae5\") " pod="openstack/ceilometer-0" Nov 21 14:04:30 crc kubenswrapper[5133]: I1121 14:04:30.994911 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/657c535b-7845-46e6-8a0e-57a1c694bae5-run-httpd\") pod \"ceilometer-0\" (UID: \"657c535b-7845-46e6-8a0e-57a1c694bae5\") " pod="openstack/ceilometer-0" Nov 21 14:04:30 crc kubenswrapper[5133]: I1121 14:04:30.994986 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/657c535b-7845-46e6-8a0e-57a1c694bae5-scripts\") pod \"ceilometer-0\" (UID: \"657c535b-7845-46e6-8a0e-57a1c694bae5\") " pod="openstack/ceilometer-0" Nov 21 14:04:30 crc kubenswrapper[5133]: I1121 14:04:30.995122 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/657c535b-7845-46e6-8a0e-57a1c694bae5-config-data\") pod \"ceilometer-0\" (UID: \"657c535b-7845-46e6-8a0e-57a1c694bae5\") " pod="openstack/ceilometer-0" Nov 21 14:04:30 crc kubenswrapper[5133]: I1121 14:04:30.995192 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/657c535b-7845-46e6-8a0e-57a1c694bae5-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"657c535b-7845-46e6-8a0e-57a1c694bae5\") " pod="openstack/ceilometer-0" Nov 21 14:04:30 crc kubenswrapper[5133]: I1121 14:04:30.995224 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/657c535b-7845-46e6-8a0e-57a1c694bae5-log-httpd\") pod \"ceilometer-0\" (UID: \"657c535b-7845-46e6-8a0e-57a1c694bae5\") " pod="openstack/ceilometer-0" Nov 21 14:04:30 crc kubenswrapper[5133]: I1121 14:04:30.995451 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/657c535b-7845-46e6-8a0e-57a1c694bae5-run-httpd\") pod \"ceilometer-0\" (UID: \"657c535b-7845-46e6-8a0e-57a1c694bae5\") " pod="openstack/ceilometer-0" Nov 21 14:04:30 crc kubenswrapper[5133]: I1121 14:04:30.999629 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/657c535b-7845-46e6-8a0e-57a1c694bae5-config-data\") pod \"ceilometer-0\" (UID: \"657c535b-7845-46e6-8a0e-57a1c694bae5\") " pod="openstack/ceilometer-0" Nov 21 14:04:31 crc kubenswrapper[5133]: I1121 14:04:31.003691 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/657c535b-7845-46e6-8a0e-57a1c694bae5-scripts\") pod \"ceilometer-0\" (UID: \"657c535b-7845-46e6-8a0e-57a1c694bae5\") " pod="openstack/ceilometer-0" Nov 21 14:04:31 crc kubenswrapper[5133]: I1121 14:04:31.004616 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/657c535b-7845-46e6-8a0e-57a1c694bae5-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"657c535b-7845-46e6-8a0e-57a1c694bae5\") " pod="openstack/ceilometer-0" Nov 21 14:04:31 crc kubenswrapper[5133]: I1121 14:04:31.004728 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/657c535b-7845-46e6-8a0e-57a1c694bae5-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"657c535b-7845-46e6-8a0e-57a1c694bae5\") " pod="openstack/ceilometer-0" Nov 21 14:04:31 crc kubenswrapper[5133]: I1121 14:04:31.014945 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bnxcq\" (UniqueName: \"kubernetes.io/projected/657c535b-7845-46e6-8a0e-57a1c694bae5-kube-api-access-bnxcq\") pod \"ceilometer-0\" (UID: \"657c535b-7845-46e6-8a0e-57a1c694bae5\") " pod="openstack/ceilometer-0" Nov 21 14:04:31 crc kubenswrapper[5133]: I1121 14:04:31.152579 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 14:04:31 crc kubenswrapper[5133]: I1121 14:04:31.375138 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f66db59b9-q57jg" event={"ID":"aa798184-6ef5-4fe7-8dd7-6454c7058f49","Type":"ContainerStarted","Data":"b8cc64d0b1a05ae86806e632c4fcb231fe16b789bccb7c2c777659f48764bb03"} Nov 21 14:04:31 crc kubenswrapper[5133]: I1121 14:04:31.375570 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5f66db59b9-q57jg" Nov 21 14:04:31 crc kubenswrapper[5133]: I1121 14:04:31.412961 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5f66db59b9-q57jg" podStartSLOduration=5.412937075 podStartE2EDuration="5.412937075s" podCreationTimestamp="2025-11-21 14:04:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 14:04:31.408675692 +0000 UTC m=+1331.206507940" watchObservedRunningTime="2025-11-21 14:04:31.412937075 +0000 UTC m=+1331.210769323" Nov 21 14:04:31 crc kubenswrapper[5133]: I1121 14:04:31.651973 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 21 14:04:31 crc kubenswrapper[5133]: W1121 14:04:31.657034 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod657c535b_7845_46e6_8a0e_57a1c694bae5.slice/crio-8d5098396646a96925e2067fbbfaee0d388e4d40228da697732a02131822fdc6 WatchSource:0}: Error finding container 8d5098396646a96925e2067fbbfaee0d388e4d40228da697732a02131822fdc6: Status 404 returned error can't find the container with id 8d5098396646a96925e2067fbbfaee0d388e4d40228da697732a02131822fdc6 Nov 21 14:04:32 crc kubenswrapper[5133]: I1121 14:04:32.394299 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"657c535b-7845-46e6-8a0e-57a1c694bae5","Type":"ContainerStarted","Data":"8d5098396646a96925e2067fbbfaee0d388e4d40228da697732a02131822fdc6"} Nov 21 14:04:32 crc kubenswrapper[5133]: I1121 14:04:32.472768 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="550fe8e4-32aa-40c5-b925-f6c496dfcdf2" path="/var/lib/kubelet/pods/550fe8e4-32aa-40c5-b925-f6c496dfcdf2/volumes" Nov 21 14:04:33 crc kubenswrapper[5133]: I1121 14:04:33.406570 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"657c535b-7845-46e6-8a0e-57a1c694bae5","Type":"ContainerStarted","Data":"58068de407f13a486f898dc36f46c1891b1001d709fc556da3fdc711a643b582"} Nov 21 14:04:34 crc kubenswrapper[5133]: I1121 14:04:34.416280 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"657c535b-7845-46e6-8a0e-57a1c694bae5","Type":"ContainerStarted","Data":"9ede2213806aed9f825688e4b9df1ba079e12cc270e67531b1cc8d05ee90c1e6"} Nov 21 14:04:35 crc kubenswrapper[5133]: I1121 14:04:35.432340 5133 generic.go:334] "Generic (PLEG): container finished" podID="e5102115-b63e-42e2-8aae-1a68e7dda37c" containerID="d252e144cec13d98268fe83087b99764332e2dfbdac245e47d37e15c587dc6de" exitCode=0 Nov 21 14:04:35 crc kubenswrapper[5133]: I1121 14:04:35.432523 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-m47ck" event={"ID":"e5102115-b63e-42e2-8aae-1a68e7dda37c","Type":"ContainerDied","Data":"d252e144cec13d98268fe83087b99764332e2dfbdac245e47d37e15c587dc6de"} Nov 21 14:04:35 crc kubenswrapper[5133]: I1121 14:04:35.437409 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"657c535b-7845-46e6-8a0e-57a1c694bae5","Type":"ContainerStarted","Data":"091d4de167f9c4af53e8fb653a12c6c5e3b54a02bc7bccead310be9d9ebdae72"} Nov 21 14:04:36 crc kubenswrapper[5133]: I1121 14:04:36.450771 5133 generic.go:334] "Generic (PLEG): container finished" podID="b61488dd-2db1-43b5-996b-43b76a5dbda6" containerID="3494c50fa90e29715228d64226f1a7d95710c4cb930d522c966e254df0e38021" exitCode=0 Nov 21 14:04:36 crc kubenswrapper[5133]: I1121 14:04:36.450920 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-8cz2f" event={"ID":"b61488dd-2db1-43b5-996b-43b76a5dbda6","Type":"ContainerDied","Data":"3494c50fa90e29715228d64226f1a7d95710c4cb930d522c966e254df0e38021"} Nov 21 14:04:36 crc kubenswrapper[5133]: I1121 14:04:36.908608 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-m47ck" Nov 21 14:04:36 crc kubenswrapper[5133]: I1121 14:04:36.924152 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5f66db59b9-q57jg" Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.022710 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6bf59f66bf-7hgt9"] Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.022987 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6bf59f66bf-7hgt9" podUID="92dd2b99-d1ce-4bdf-a002-94828c44bb40" containerName="dnsmasq-dns" containerID="cri-o://4a5564c86af0c6c595c719fb4772e5581e5b38cf3a2321ee5521d1dbeee04cca" gracePeriod=10 Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.031401 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5102115-b63e-42e2-8aae-1a68e7dda37c-combined-ca-bundle\") pod \"e5102115-b63e-42e2-8aae-1a68e7dda37c\" (UID: \"e5102115-b63e-42e2-8aae-1a68e7dda37c\") " Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.031517 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7t567\" (UniqueName: \"kubernetes.io/projected/e5102115-b63e-42e2-8aae-1a68e7dda37c-kube-api-access-7t567\") pod \"e5102115-b63e-42e2-8aae-1a68e7dda37c\" (UID: \"e5102115-b63e-42e2-8aae-1a68e7dda37c\") " Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.031611 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e5102115-b63e-42e2-8aae-1a68e7dda37c-db-sync-config-data\") pod \"e5102115-b63e-42e2-8aae-1a68e7dda37c\" (UID: \"e5102115-b63e-42e2-8aae-1a68e7dda37c\") " Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.041201 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e5102115-b63e-42e2-8aae-1a68e7dda37c-kube-api-access-7t567" (OuterVolumeSpecName: "kube-api-access-7t567") pod "e5102115-b63e-42e2-8aae-1a68e7dda37c" (UID: "e5102115-b63e-42e2-8aae-1a68e7dda37c"). InnerVolumeSpecName "kube-api-access-7t567". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.052122 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e5102115-b63e-42e2-8aae-1a68e7dda37c-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "e5102115-b63e-42e2-8aae-1a68e7dda37c" (UID: "e5102115-b63e-42e2-8aae-1a68e7dda37c"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.076618 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e5102115-b63e-42e2-8aae-1a68e7dda37c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e5102115-b63e-42e2-8aae-1a68e7dda37c" (UID: "e5102115-b63e-42e2-8aae-1a68e7dda37c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.134509 5133 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5102115-b63e-42e2-8aae-1a68e7dda37c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.134562 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7t567\" (UniqueName: \"kubernetes.io/projected/e5102115-b63e-42e2-8aae-1a68e7dda37c-kube-api-access-7t567\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.134578 5133 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e5102115-b63e-42e2-8aae-1a68e7dda37c-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.506070 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-m47ck" event={"ID":"e5102115-b63e-42e2-8aae-1a68e7dda37c","Type":"ContainerDied","Data":"1379406d2cc782d96ee5146cb934579b91a90bdb915702689845f9db9234ce8c"} Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.506118 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1379406d2cc782d96ee5146cb934579b91a90bdb915702689845f9db9234ce8c" Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.506254 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-m47ck" Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.522258 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"657c535b-7845-46e6-8a0e-57a1c694bae5","Type":"ContainerStarted","Data":"b3aca601ae498023a91992a1c5d57804628421c59fc9f7347650c01a486e6357"} Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.522966 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.526324 5133 generic.go:334] "Generic (PLEG): container finished" podID="92dd2b99-d1ce-4bdf-a002-94828c44bb40" containerID="4a5564c86af0c6c595c719fb4772e5581e5b38cf3a2321ee5521d1dbeee04cca" exitCode=0 Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.526504 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bf59f66bf-7hgt9" event={"ID":"92dd2b99-d1ce-4bdf-a002-94828c44bb40","Type":"ContainerDied","Data":"4a5564c86af0c6c595c719fb4772e5581e5b38cf3a2321ee5521d1dbeee04cca"} Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.526534 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bf59f66bf-7hgt9" event={"ID":"92dd2b99-d1ce-4bdf-a002-94828c44bb40","Type":"ContainerDied","Data":"7edfe4e12bdc90b8a43bf14f784c63dac70f98603967d7e3ad689dd2137fa74f"} Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.526546 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7edfe4e12bdc90b8a43bf14f784c63dac70f98603967d7e3ad689dd2137fa74f" Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.546083 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.9198591670000003 podStartE2EDuration="7.546059567s" podCreationTimestamp="2025-11-21 14:04:30 +0000 UTC" firstStartedPulling="2025-11-21 14:04:31.661251538 +0000 UTC m=+1331.459083786" lastFinishedPulling="2025-11-21 14:04:36.287451938 +0000 UTC m=+1336.085284186" observedRunningTime="2025-11-21 14:04:37.541165447 +0000 UTC m=+1337.338997705" watchObservedRunningTime="2025-11-21 14:04:37.546059567 +0000 UTC m=+1337.343891815" Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.564876 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bf59f66bf-7hgt9" Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.644436 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/92dd2b99-d1ce-4bdf-a002-94828c44bb40-ovsdbserver-sb\") pod \"92dd2b99-d1ce-4bdf-a002-94828c44bb40\" (UID: \"92dd2b99-d1ce-4bdf-a002-94828c44bb40\") " Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.644548 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qzn2b\" (UniqueName: \"kubernetes.io/projected/92dd2b99-d1ce-4bdf-a002-94828c44bb40-kube-api-access-qzn2b\") pod \"92dd2b99-d1ce-4bdf-a002-94828c44bb40\" (UID: \"92dd2b99-d1ce-4bdf-a002-94828c44bb40\") " Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.644658 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/92dd2b99-d1ce-4bdf-a002-94828c44bb40-config\") pod \"92dd2b99-d1ce-4bdf-a002-94828c44bb40\" (UID: \"92dd2b99-d1ce-4bdf-a002-94828c44bb40\") " Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.644691 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/92dd2b99-d1ce-4bdf-a002-94828c44bb40-ovsdbserver-nb\") pod \"92dd2b99-d1ce-4bdf-a002-94828c44bb40\" (UID: \"92dd2b99-d1ce-4bdf-a002-94828c44bb40\") " Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.644751 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/92dd2b99-d1ce-4bdf-a002-94828c44bb40-dns-svc\") pod \"92dd2b99-d1ce-4bdf-a002-94828c44bb40\" (UID: \"92dd2b99-d1ce-4bdf-a002-94828c44bb40\") " Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.693141 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/92dd2b99-d1ce-4bdf-a002-94828c44bb40-kube-api-access-qzn2b" (OuterVolumeSpecName: "kube-api-access-qzn2b") pod "92dd2b99-d1ce-4bdf-a002-94828c44bb40" (UID: "92dd2b99-d1ce-4bdf-a002-94828c44bb40"). InnerVolumeSpecName "kube-api-access-qzn2b". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.747741 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qzn2b\" (UniqueName: \"kubernetes.io/projected/92dd2b99-d1ce-4bdf-a002-94828c44bb40-kube-api-access-qzn2b\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.757754 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-68665bb7f8-mv95p"] Nov 21 14:04:37 crc kubenswrapper[5133]: E1121 14:04:37.758206 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="92dd2b99-d1ce-4bdf-a002-94828c44bb40" containerName="init" Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.758218 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="92dd2b99-d1ce-4bdf-a002-94828c44bb40" containerName="init" Nov 21 14:04:37 crc kubenswrapper[5133]: E1121 14:04:37.758240 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="92dd2b99-d1ce-4bdf-a002-94828c44bb40" containerName="dnsmasq-dns" Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.758246 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="92dd2b99-d1ce-4bdf-a002-94828c44bb40" containerName="dnsmasq-dns" Nov 21 14:04:37 crc kubenswrapper[5133]: E1121 14:04:37.758258 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5102115-b63e-42e2-8aae-1a68e7dda37c" containerName="barbican-db-sync" Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.758264 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5102115-b63e-42e2-8aae-1a68e7dda37c" containerName="barbican-db-sync" Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.758440 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="e5102115-b63e-42e2-8aae-1a68e7dda37c" containerName="barbican-db-sync" Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.758459 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="92dd2b99-d1ce-4bdf-a002-94828c44bb40" containerName="dnsmasq-dns" Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.759505 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-68665bb7f8-mv95p" Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.763388 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.763682 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.764080 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-58hz5" Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.776473 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-654ff44bc9-5rc2k"] Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.780051 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/92dd2b99-d1ce-4bdf-a002-94828c44bb40-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "92dd2b99-d1ce-4bdf-a002-94828c44bb40" (UID: "92dd2b99-d1ce-4bdf-a002-94828c44bb40"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.782691 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-654ff44bc9-5rc2k" Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.794062 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.796009 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/92dd2b99-d1ce-4bdf-a002-94828c44bb40-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "92dd2b99-d1ce-4bdf-a002-94828c44bb40" (UID: "92dd2b99-d1ce-4bdf-a002-94828c44bb40"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.798813 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-654ff44bc9-5rc2k"] Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.830056 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/92dd2b99-d1ce-4bdf-a002-94828c44bb40-config" (OuterVolumeSpecName: "config") pod "92dd2b99-d1ce-4bdf-a002-94828c44bb40" (UID: "92dd2b99-d1ce-4bdf-a002-94828c44bb40"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.837853 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-68665bb7f8-mv95p"] Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.847626 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/92dd2b99-d1ce-4bdf-a002-94828c44bb40-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "92dd2b99-d1ce-4bdf-a002-94828c44bb40" (UID: "92dd2b99-d1ce-4bdf-a002-94828c44bb40"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.849369 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1701df73-8d79-4305-a204-4ab7bf029dfd-logs\") pod \"barbican-worker-654ff44bc9-5rc2k\" (UID: \"1701df73-8d79-4305-a204-4ab7bf029dfd\") " pod="openstack/barbican-worker-654ff44bc9-5rc2k" Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.849435 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f583d06c-f59e-4f1e-883b-0ad7617e3c0f-config-data\") pod \"barbican-keystone-listener-68665bb7f8-mv95p\" (UID: \"f583d06c-f59e-4f1e-883b-0ad7617e3c0f\") " pod="openstack/barbican-keystone-listener-68665bb7f8-mv95p" Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.849463 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1701df73-8d79-4305-a204-4ab7bf029dfd-config-data-custom\") pod \"barbican-worker-654ff44bc9-5rc2k\" (UID: \"1701df73-8d79-4305-a204-4ab7bf029dfd\") " pod="openstack/barbican-worker-654ff44bc9-5rc2k" Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.849488 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1701df73-8d79-4305-a204-4ab7bf029dfd-combined-ca-bundle\") pod \"barbican-worker-654ff44bc9-5rc2k\" (UID: \"1701df73-8d79-4305-a204-4ab7bf029dfd\") " pod="openstack/barbican-worker-654ff44bc9-5rc2k" Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.849516 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1701df73-8d79-4305-a204-4ab7bf029dfd-config-data\") pod \"barbican-worker-654ff44bc9-5rc2k\" (UID: \"1701df73-8d79-4305-a204-4ab7bf029dfd\") " pod="openstack/barbican-worker-654ff44bc9-5rc2k" Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.849533 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f583d06c-f59e-4f1e-883b-0ad7617e3c0f-logs\") pod \"barbican-keystone-listener-68665bb7f8-mv95p\" (UID: \"f583d06c-f59e-4f1e-883b-0ad7617e3c0f\") " pod="openstack/barbican-keystone-listener-68665bb7f8-mv95p" Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.849557 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f583d06c-f59e-4f1e-883b-0ad7617e3c0f-combined-ca-bundle\") pod \"barbican-keystone-listener-68665bb7f8-mv95p\" (UID: \"f583d06c-f59e-4f1e-883b-0ad7617e3c0f\") " pod="openstack/barbican-keystone-listener-68665bb7f8-mv95p" Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.849584 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q66vn\" (UniqueName: \"kubernetes.io/projected/f583d06c-f59e-4f1e-883b-0ad7617e3c0f-kube-api-access-q66vn\") pod \"barbican-keystone-listener-68665bb7f8-mv95p\" (UID: \"f583d06c-f59e-4f1e-883b-0ad7617e3c0f\") " pod="openstack/barbican-keystone-listener-68665bb7f8-mv95p" Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.849607 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f583d06c-f59e-4f1e-883b-0ad7617e3c0f-config-data-custom\") pod \"barbican-keystone-listener-68665bb7f8-mv95p\" (UID: \"f583d06c-f59e-4f1e-883b-0ad7617e3c0f\") " pod="openstack/barbican-keystone-listener-68665bb7f8-mv95p" Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.849626 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x5mn8\" (UniqueName: \"kubernetes.io/projected/1701df73-8d79-4305-a204-4ab7bf029dfd-kube-api-access-x5mn8\") pod \"barbican-worker-654ff44bc9-5rc2k\" (UID: \"1701df73-8d79-4305-a204-4ab7bf029dfd\") " pod="openstack/barbican-worker-654ff44bc9-5rc2k" Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.849686 5133 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/92dd2b99-d1ce-4bdf-a002-94828c44bb40-config\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.849699 5133 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/92dd2b99-d1ce-4bdf-a002-94828c44bb40-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.849711 5133 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/92dd2b99-d1ce-4bdf-a002-94828c44bb40-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.849720 5133 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/92dd2b99-d1ce-4bdf-a002-94828c44bb40-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.891064 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-869f779d85-9cqvh"] Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.893072 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-869f779d85-9cqvh" Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.933597 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-869f779d85-9cqvh"] Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.954591 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x5mn8\" (UniqueName: \"kubernetes.io/projected/1701df73-8d79-4305-a204-4ab7bf029dfd-kube-api-access-x5mn8\") pod \"barbican-worker-654ff44bc9-5rc2k\" (UID: \"1701df73-8d79-4305-a204-4ab7bf029dfd\") " pod="openstack/barbican-worker-654ff44bc9-5rc2k" Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.954666 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/33026c97-aabd-489f-a019-ff1337ff971b-ovsdbserver-sb\") pod \"dnsmasq-dns-869f779d85-9cqvh\" (UID: \"33026c97-aabd-489f-a019-ff1337ff971b\") " pod="openstack/dnsmasq-dns-869f779d85-9cqvh" Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.954702 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/33026c97-aabd-489f-a019-ff1337ff971b-dns-svc\") pod \"dnsmasq-dns-869f779d85-9cqvh\" (UID: \"33026c97-aabd-489f-a019-ff1337ff971b\") " pod="openstack/dnsmasq-dns-869f779d85-9cqvh" Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.954729 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/33026c97-aabd-489f-a019-ff1337ff971b-config\") pod \"dnsmasq-dns-869f779d85-9cqvh\" (UID: \"33026c97-aabd-489f-a019-ff1337ff971b\") " pod="openstack/dnsmasq-dns-869f779d85-9cqvh" Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.954779 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1701df73-8d79-4305-a204-4ab7bf029dfd-logs\") pod \"barbican-worker-654ff44bc9-5rc2k\" (UID: \"1701df73-8d79-4305-a204-4ab7bf029dfd\") " pod="openstack/barbican-worker-654ff44bc9-5rc2k" Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.954807 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f583d06c-f59e-4f1e-883b-0ad7617e3c0f-config-data\") pod \"barbican-keystone-listener-68665bb7f8-mv95p\" (UID: \"f583d06c-f59e-4f1e-883b-0ad7617e3c0f\") " pod="openstack/barbican-keystone-listener-68665bb7f8-mv95p" Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.954831 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1701df73-8d79-4305-a204-4ab7bf029dfd-config-data-custom\") pod \"barbican-worker-654ff44bc9-5rc2k\" (UID: \"1701df73-8d79-4305-a204-4ab7bf029dfd\") " pod="openstack/barbican-worker-654ff44bc9-5rc2k" Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.954861 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/33026c97-aabd-489f-a019-ff1337ff971b-ovsdbserver-nb\") pod \"dnsmasq-dns-869f779d85-9cqvh\" (UID: \"33026c97-aabd-489f-a019-ff1337ff971b\") " pod="openstack/dnsmasq-dns-869f779d85-9cqvh" Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.954881 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1701df73-8d79-4305-a204-4ab7bf029dfd-combined-ca-bundle\") pod \"barbican-worker-654ff44bc9-5rc2k\" (UID: \"1701df73-8d79-4305-a204-4ab7bf029dfd\") " pod="openstack/barbican-worker-654ff44bc9-5rc2k" Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.954906 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kr9g5\" (UniqueName: \"kubernetes.io/projected/33026c97-aabd-489f-a019-ff1337ff971b-kube-api-access-kr9g5\") pod \"dnsmasq-dns-869f779d85-9cqvh\" (UID: \"33026c97-aabd-489f-a019-ff1337ff971b\") " pod="openstack/dnsmasq-dns-869f779d85-9cqvh" Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.954931 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1701df73-8d79-4305-a204-4ab7bf029dfd-config-data\") pod \"barbican-worker-654ff44bc9-5rc2k\" (UID: \"1701df73-8d79-4305-a204-4ab7bf029dfd\") " pod="openstack/barbican-worker-654ff44bc9-5rc2k" Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.954954 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f583d06c-f59e-4f1e-883b-0ad7617e3c0f-logs\") pod \"barbican-keystone-listener-68665bb7f8-mv95p\" (UID: \"f583d06c-f59e-4f1e-883b-0ad7617e3c0f\") " pod="openstack/barbican-keystone-listener-68665bb7f8-mv95p" Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.954981 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f583d06c-f59e-4f1e-883b-0ad7617e3c0f-combined-ca-bundle\") pod \"barbican-keystone-listener-68665bb7f8-mv95p\" (UID: \"f583d06c-f59e-4f1e-883b-0ad7617e3c0f\") " pod="openstack/barbican-keystone-listener-68665bb7f8-mv95p" Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.955035 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q66vn\" (UniqueName: \"kubernetes.io/projected/f583d06c-f59e-4f1e-883b-0ad7617e3c0f-kube-api-access-q66vn\") pod \"barbican-keystone-listener-68665bb7f8-mv95p\" (UID: \"f583d06c-f59e-4f1e-883b-0ad7617e3c0f\") " pod="openstack/barbican-keystone-listener-68665bb7f8-mv95p" Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.955062 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f583d06c-f59e-4f1e-883b-0ad7617e3c0f-config-data-custom\") pod \"barbican-keystone-listener-68665bb7f8-mv95p\" (UID: \"f583d06c-f59e-4f1e-883b-0ad7617e3c0f\") " pod="openstack/barbican-keystone-listener-68665bb7f8-mv95p" Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.964717 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1701df73-8d79-4305-a204-4ab7bf029dfd-logs\") pod \"barbican-worker-654ff44bc9-5rc2k\" (UID: \"1701df73-8d79-4305-a204-4ab7bf029dfd\") " pod="openstack/barbican-worker-654ff44bc9-5rc2k" Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.966529 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1701df73-8d79-4305-a204-4ab7bf029dfd-config-data\") pod \"barbican-worker-654ff44bc9-5rc2k\" (UID: \"1701df73-8d79-4305-a204-4ab7bf029dfd\") " pod="openstack/barbican-worker-654ff44bc9-5rc2k" Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.969479 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f583d06c-f59e-4f1e-883b-0ad7617e3c0f-logs\") pod \"barbican-keystone-listener-68665bb7f8-mv95p\" (UID: \"f583d06c-f59e-4f1e-883b-0ad7617e3c0f\") " pod="openstack/barbican-keystone-listener-68665bb7f8-mv95p" Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.972156 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1701df73-8d79-4305-a204-4ab7bf029dfd-config-data-custom\") pod \"barbican-worker-654ff44bc9-5rc2k\" (UID: \"1701df73-8d79-4305-a204-4ab7bf029dfd\") " pod="openstack/barbican-worker-654ff44bc9-5rc2k" Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.973922 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-585c9d5854-m4bwc"] Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.976707 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-585c9d5854-m4bwc" Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.980657 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f583d06c-f59e-4f1e-883b-0ad7617e3c0f-config-data\") pod \"barbican-keystone-listener-68665bb7f8-mv95p\" (UID: \"f583d06c-f59e-4f1e-883b-0ad7617e3c0f\") " pod="openstack/barbican-keystone-listener-68665bb7f8-mv95p" Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.981239 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f583d06c-f59e-4f1e-883b-0ad7617e3c0f-config-data-custom\") pod \"barbican-keystone-listener-68665bb7f8-mv95p\" (UID: \"f583d06c-f59e-4f1e-883b-0ad7617e3c0f\") " pod="openstack/barbican-keystone-listener-68665bb7f8-mv95p" Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.981553 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1701df73-8d79-4305-a204-4ab7bf029dfd-combined-ca-bundle\") pod \"barbican-worker-654ff44bc9-5rc2k\" (UID: \"1701df73-8d79-4305-a204-4ab7bf029dfd\") " pod="openstack/barbican-worker-654ff44bc9-5rc2k" Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.981762 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.995848 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x5mn8\" (UniqueName: \"kubernetes.io/projected/1701df73-8d79-4305-a204-4ab7bf029dfd-kube-api-access-x5mn8\") pod \"barbican-worker-654ff44bc9-5rc2k\" (UID: \"1701df73-8d79-4305-a204-4ab7bf029dfd\") " pod="openstack/barbican-worker-654ff44bc9-5rc2k" Nov 21 14:04:37 crc kubenswrapper[5133]: I1121 14:04:37.998779 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-585c9d5854-m4bwc"] Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.034247 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q66vn\" (UniqueName: \"kubernetes.io/projected/f583d06c-f59e-4f1e-883b-0ad7617e3c0f-kube-api-access-q66vn\") pod \"barbican-keystone-listener-68665bb7f8-mv95p\" (UID: \"f583d06c-f59e-4f1e-883b-0ad7617e3c0f\") " pod="openstack/barbican-keystone-listener-68665bb7f8-mv95p" Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.034291 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f583d06c-f59e-4f1e-883b-0ad7617e3c0f-combined-ca-bundle\") pod \"barbican-keystone-listener-68665bb7f8-mv95p\" (UID: \"f583d06c-f59e-4f1e-883b-0ad7617e3c0f\") " pod="openstack/barbican-keystone-listener-68665bb7f8-mv95p" Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.058290 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/33026c97-aabd-489f-a019-ff1337ff971b-dns-svc\") pod \"dnsmasq-dns-869f779d85-9cqvh\" (UID: \"33026c97-aabd-489f-a019-ff1337ff971b\") " pod="openstack/dnsmasq-dns-869f779d85-9cqvh" Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.058393 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/33026c97-aabd-489f-a019-ff1337ff971b-config\") pod \"dnsmasq-dns-869f779d85-9cqvh\" (UID: \"33026c97-aabd-489f-a019-ff1337ff971b\") " pod="openstack/dnsmasq-dns-869f779d85-9cqvh" Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.058505 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/33026c97-aabd-489f-a019-ff1337ff971b-ovsdbserver-nb\") pod \"dnsmasq-dns-869f779d85-9cqvh\" (UID: \"33026c97-aabd-489f-a019-ff1337ff971b\") " pod="openstack/dnsmasq-dns-869f779d85-9cqvh" Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.058535 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-74jq8\" (UniqueName: \"kubernetes.io/projected/3b99cb93-e45a-4c5a-a449-eafa3241be56-kube-api-access-74jq8\") pod \"barbican-api-585c9d5854-m4bwc\" (UID: \"3b99cb93-e45a-4c5a-a449-eafa3241be56\") " pod="openstack/barbican-api-585c9d5854-m4bwc" Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.058571 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kr9g5\" (UniqueName: \"kubernetes.io/projected/33026c97-aabd-489f-a019-ff1337ff971b-kube-api-access-kr9g5\") pod \"dnsmasq-dns-869f779d85-9cqvh\" (UID: \"33026c97-aabd-489f-a019-ff1337ff971b\") " pod="openstack/dnsmasq-dns-869f779d85-9cqvh" Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.058595 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3b99cb93-e45a-4c5a-a449-eafa3241be56-config-data\") pod \"barbican-api-585c9d5854-m4bwc\" (UID: \"3b99cb93-e45a-4c5a-a449-eafa3241be56\") " pod="openstack/barbican-api-585c9d5854-m4bwc" Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.058650 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3b99cb93-e45a-4c5a-a449-eafa3241be56-config-data-custom\") pod \"barbican-api-585c9d5854-m4bwc\" (UID: \"3b99cb93-e45a-4c5a-a449-eafa3241be56\") " pod="openstack/barbican-api-585c9d5854-m4bwc" Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.058696 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3b99cb93-e45a-4c5a-a449-eafa3241be56-logs\") pod \"barbican-api-585c9d5854-m4bwc\" (UID: \"3b99cb93-e45a-4c5a-a449-eafa3241be56\") " pod="openstack/barbican-api-585c9d5854-m4bwc" Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.058717 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3b99cb93-e45a-4c5a-a449-eafa3241be56-combined-ca-bundle\") pod \"barbican-api-585c9d5854-m4bwc\" (UID: \"3b99cb93-e45a-4c5a-a449-eafa3241be56\") " pod="openstack/barbican-api-585c9d5854-m4bwc" Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.058769 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/33026c97-aabd-489f-a019-ff1337ff971b-ovsdbserver-sb\") pod \"dnsmasq-dns-869f779d85-9cqvh\" (UID: \"33026c97-aabd-489f-a019-ff1337ff971b\") " pod="openstack/dnsmasq-dns-869f779d85-9cqvh" Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.059870 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/33026c97-aabd-489f-a019-ff1337ff971b-ovsdbserver-sb\") pod \"dnsmasq-dns-869f779d85-9cqvh\" (UID: \"33026c97-aabd-489f-a019-ff1337ff971b\") " pod="openstack/dnsmasq-dns-869f779d85-9cqvh" Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.059968 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/33026c97-aabd-489f-a019-ff1337ff971b-dns-svc\") pod \"dnsmasq-dns-869f779d85-9cqvh\" (UID: \"33026c97-aabd-489f-a019-ff1337ff971b\") " pod="openstack/dnsmasq-dns-869f779d85-9cqvh" Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.060675 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/33026c97-aabd-489f-a019-ff1337ff971b-ovsdbserver-nb\") pod \"dnsmasq-dns-869f779d85-9cqvh\" (UID: \"33026c97-aabd-489f-a019-ff1337ff971b\") " pod="openstack/dnsmasq-dns-869f779d85-9cqvh" Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.061956 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/33026c97-aabd-489f-a019-ff1337ff971b-config\") pod \"dnsmasq-dns-869f779d85-9cqvh\" (UID: \"33026c97-aabd-489f-a019-ff1337ff971b\") " pod="openstack/dnsmasq-dns-869f779d85-9cqvh" Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.081340 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kr9g5\" (UniqueName: \"kubernetes.io/projected/33026c97-aabd-489f-a019-ff1337ff971b-kube-api-access-kr9g5\") pod \"dnsmasq-dns-869f779d85-9cqvh\" (UID: \"33026c97-aabd-489f-a019-ff1337ff971b\") " pod="openstack/dnsmasq-dns-869f779d85-9cqvh" Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.098117 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-8cz2f" Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.120356 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-68665bb7f8-mv95p" Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.146472 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-654ff44bc9-5rc2k" Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.164690 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b61488dd-2db1-43b5-996b-43b76a5dbda6-db-sync-config-data\") pod \"b61488dd-2db1-43b5-996b-43b76a5dbda6\" (UID: \"b61488dd-2db1-43b5-996b-43b76a5dbda6\") " Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.164842 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b61488dd-2db1-43b5-996b-43b76a5dbda6-config-data\") pod \"b61488dd-2db1-43b5-996b-43b76a5dbda6\" (UID: \"b61488dd-2db1-43b5-996b-43b76a5dbda6\") " Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.164886 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b61488dd-2db1-43b5-996b-43b76a5dbda6-combined-ca-bundle\") pod \"b61488dd-2db1-43b5-996b-43b76a5dbda6\" (UID: \"b61488dd-2db1-43b5-996b-43b76a5dbda6\") " Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.164943 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b61488dd-2db1-43b5-996b-43b76a5dbda6-etc-machine-id\") pod \"b61488dd-2db1-43b5-996b-43b76a5dbda6\" (UID: \"b61488dd-2db1-43b5-996b-43b76a5dbda6\") " Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.165274 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b61488dd-2db1-43b5-996b-43b76a5dbda6-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "b61488dd-2db1-43b5-996b-43b76a5dbda6" (UID: "b61488dd-2db1-43b5-996b-43b76a5dbda6"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.166475 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hdp96\" (UniqueName: \"kubernetes.io/projected/b61488dd-2db1-43b5-996b-43b76a5dbda6-kube-api-access-hdp96\") pod \"b61488dd-2db1-43b5-996b-43b76a5dbda6\" (UID: \"b61488dd-2db1-43b5-996b-43b76a5dbda6\") " Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.166527 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b61488dd-2db1-43b5-996b-43b76a5dbda6-scripts\") pod \"b61488dd-2db1-43b5-996b-43b76a5dbda6\" (UID: \"b61488dd-2db1-43b5-996b-43b76a5dbda6\") " Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.166806 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3b99cb93-e45a-4c5a-a449-eafa3241be56-config-data-custom\") pod \"barbican-api-585c9d5854-m4bwc\" (UID: \"3b99cb93-e45a-4c5a-a449-eafa3241be56\") " pod="openstack/barbican-api-585c9d5854-m4bwc" Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.166849 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3b99cb93-e45a-4c5a-a449-eafa3241be56-logs\") pod \"barbican-api-585c9d5854-m4bwc\" (UID: \"3b99cb93-e45a-4c5a-a449-eafa3241be56\") " pod="openstack/barbican-api-585c9d5854-m4bwc" Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.166876 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3b99cb93-e45a-4c5a-a449-eafa3241be56-combined-ca-bundle\") pod \"barbican-api-585c9d5854-m4bwc\" (UID: \"3b99cb93-e45a-4c5a-a449-eafa3241be56\") " pod="openstack/barbican-api-585c9d5854-m4bwc" Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.167011 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-74jq8\" (UniqueName: \"kubernetes.io/projected/3b99cb93-e45a-4c5a-a449-eafa3241be56-kube-api-access-74jq8\") pod \"barbican-api-585c9d5854-m4bwc\" (UID: \"3b99cb93-e45a-4c5a-a449-eafa3241be56\") " pod="openstack/barbican-api-585c9d5854-m4bwc" Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.167044 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3b99cb93-e45a-4c5a-a449-eafa3241be56-config-data\") pod \"barbican-api-585c9d5854-m4bwc\" (UID: \"3b99cb93-e45a-4c5a-a449-eafa3241be56\") " pod="openstack/barbican-api-585c9d5854-m4bwc" Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.167107 5133 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b61488dd-2db1-43b5-996b-43b76a5dbda6-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.170056 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b61488dd-2db1-43b5-996b-43b76a5dbda6-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "b61488dd-2db1-43b5-996b-43b76a5dbda6" (UID: "b61488dd-2db1-43b5-996b-43b76a5dbda6"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.173165 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3b99cb93-e45a-4c5a-a449-eafa3241be56-logs\") pod \"barbican-api-585c9d5854-m4bwc\" (UID: \"3b99cb93-e45a-4c5a-a449-eafa3241be56\") " pod="openstack/barbican-api-585c9d5854-m4bwc" Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.177883 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b61488dd-2db1-43b5-996b-43b76a5dbda6-scripts" (OuterVolumeSpecName: "scripts") pod "b61488dd-2db1-43b5-996b-43b76a5dbda6" (UID: "b61488dd-2db1-43b5-996b-43b76a5dbda6"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.182693 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3b99cb93-e45a-4c5a-a449-eafa3241be56-config-data-custom\") pod \"barbican-api-585c9d5854-m4bwc\" (UID: \"3b99cb93-e45a-4c5a-a449-eafa3241be56\") " pod="openstack/barbican-api-585c9d5854-m4bwc" Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.186495 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b61488dd-2db1-43b5-996b-43b76a5dbda6-kube-api-access-hdp96" (OuterVolumeSpecName: "kube-api-access-hdp96") pod "b61488dd-2db1-43b5-996b-43b76a5dbda6" (UID: "b61488dd-2db1-43b5-996b-43b76a5dbda6"). InnerVolumeSpecName "kube-api-access-hdp96". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.186909 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3b99cb93-e45a-4c5a-a449-eafa3241be56-combined-ca-bundle\") pod \"barbican-api-585c9d5854-m4bwc\" (UID: \"3b99cb93-e45a-4c5a-a449-eafa3241be56\") " pod="openstack/barbican-api-585c9d5854-m4bwc" Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.188880 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3b99cb93-e45a-4c5a-a449-eafa3241be56-config-data\") pod \"barbican-api-585c9d5854-m4bwc\" (UID: \"3b99cb93-e45a-4c5a-a449-eafa3241be56\") " pod="openstack/barbican-api-585c9d5854-m4bwc" Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.198163 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-74jq8\" (UniqueName: \"kubernetes.io/projected/3b99cb93-e45a-4c5a-a449-eafa3241be56-kube-api-access-74jq8\") pod \"barbican-api-585c9d5854-m4bwc\" (UID: \"3b99cb93-e45a-4c5a-a449-eafa3241be56\") " pod="openstack/barbican-api-585c9d5854-m4bwc" Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.206010 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b61488dd-2db1-43b5-996b-43b76a5dbda6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b61488dd-2db1-43b5-996b-43b76a5dbda6" (UID: "b61488dd-2db1-43b5-996b-43b76a5dbda6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.224285 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-869f779d85-9cqvh" Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.275513 5133 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b61488dd-2db1-43b5-996b-43b76a5dbda6-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.275562 5133 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b61488dd-2db1-43b5-996b-43b76a5dbda6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.275580 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hdp96\" (UniqueName: \"kubernetes.io/projected/b61488dd-2db1-43b5-996b-43b76a5dbda6-kube-api-access-hdp96\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.275594 5133 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b61488dd-2db1-43b5-996b-43b76a5dbda6-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.288175 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b61488dd-2db1-43b5-996b-43b76a5dbda6-config-data" (OuterVolumeSpecName: "config-data") pod "b61488dd-2db1-43b5-996b-43b76a5dbda6" (UID: "b61488dd-2db1-43b5-996b-43b76a5dbda6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.347605 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-585c9d5854-m4bwc" Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.377448 5133 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b61488dd-2db1-43b5-996b-43b76a5dbda6-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.560504 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-8cz2f" event={"ID":"b61488dd-2db1-43b5-996b-43b76a5dbda6","Type":"ContainerDied","Data":"c7a4e2a3250f5d9d66bbbbb2056fc3cd77c91041bad9ea407b0d0847b412edc6"} Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.560567 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c7a4e2a3250f5d9d66bbbbb2056fc3cd77c91041bad9ea407b0d0847b412edc6" Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.560635 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-8cz2f" Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.560931 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bf59f66bf-7hgt9" Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.608255 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6bf59f66bf-7hgt9"] Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.619806 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6bf59f66bf-7hgt9"] Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.722291 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 21 14:04:38 crc kubenswrapper[5133]: E1121 14:04:38.723161 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b61488dd-2db1-43b5-996b-43b76a5dbda6" containerName="cinder-db-sync" Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.723184 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="b61488dd-2db1-43b5-996b-43b76a5dbda6" containerName="cinder-db-sync" Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.723386 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="b61488dd-2db1-43b5-996b-43b76a5dbda6" containerName="cinder-db-sync" Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.724925 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.736226 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.736986 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-hf74x" Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.737146 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.737782 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.750829 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.812096 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-869f779d85-9cqvh"] Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.819986 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-68665bb7f8-mv95p"] Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.825659 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-654ff44bc9-5rc2k"] Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.874181 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-58db5546cc-m2zdv"] Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.925367 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-58db5546cc-m2zdv" Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.937075 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1a96d392-2cf2-44c6-94d0-5df6fc14c4d1-config-data\") pod \"cinder-scheduler-0\" (UID: \"1a96d392-2cf2-44c6-94d0-5df6fc14c4d1\") " pod="openstack/cinder-scheduler-0" Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.937624 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1a96d392-2cf2-44c6-94d0-5df6fc14c4d1-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"1a96d392-2cf2-44c6-94d0-5df6fc14c4d1\") " pod="openstack/cinder-scheduler-0" Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.937660 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fph4d\" (UniqueName: \"kubernetes.io/projected/1a96d392-2cf2-44c6-94d0-5df6fc14c4d1-kube-api-access-fph4d\") pod \"cinder-scheduler-0\" (UID: \"1a96d392-2cf2-44c6-94d0-5df6fc14c4d1\") " pod="openstack/cinder-scheduler-0" Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.937705 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a96d392-2cf2-44c6-94d0-5df6fc14c4d1-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"1a96d392-2cf2-44c6-94d0-5df6fc14c4d1\") " pod="openstack/cinder-scheduler-0" Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.937737 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1a96d392-2cf2-44c6-94d0-5df6fc14c4d1-scripts\") pod \"cinder-scheduler-0\" (UID: \"1a96d392-2cf2-44c6-94d0-5df6fc14c4d1\") " pod="openstack/cinder-scheduler-0" Nov 21 14:04:38 crc kubenswrapper[5133]: I1121 14:04:38.937834 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1a96d392-2cf2-44c6-94d0-5df6fc14c4d1-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"1a96d392-2cf2-44c6-94d0-5df6fc14c4d1\") " pod="openstack/cinder-scheduler-0" Nov 21 14:04:39 crc kubenswrapper[5133]: I1121 14:04:39.016092 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-58db5546cc-m2zdv"] Nov 21 14:04:39 crc kubenswrapper[5133]: I1121 14:04:39.043732 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/169191a4-579a-4818-be80-bd64998e72a1-dns-svc\") pod \"dnsmasq-dns-58db5546cc-m2zdv\" (UID: \"169191a4-579a-4818-be80-bd64998e72a1\") " pod="openstack/dnsmasq-dns-58db5546cc-m2zdv" Nov 21 14:04:39 crc kubenswrapper[5133]: I1121 14:04:39.043791 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1a96d392-2cf2-44c6-94d0-5df6fc14c4d1-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"1a96d392-2cf2-44c6-94d0-5df6fc14c4d1\") " pod="openstack/cinder-scheduler-0" Nov 21 14:04:39 crc kubenswrapper[5133]: I1121 14:04:39.043819 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/169191a4-579a-4818-be80-bd64998e72a1-config\") pod \"dnsmasq-dns-58db5546cc-m2zdv\" (UID: \"169191a4-579a-4818-be80-bd64998e72a1\") " pod="openstack/dnsmasq-dns-58db5546cc-m2zdv" Nov 21 14:04:39 crc kubenswrapper[5133]: I1121 14:04:39.043841 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1a96d392-2cf2-44c6-94d0-5df6fc14c4d1-config-data\") pod \"cinder-scheduler-0\" (UID: \"1a96d392-2cf2-44c6-94d0-5df6fc14c4d1\") " pod="openstack/cinder-scheduler-0" Nov 21 14:04:39 crc kubenswrapper[5133]: I1121 14:04:39.043865 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/169191a4-579a-4818-be80-bd64998e72a1-ovsdbserver-sb\") pod \"dnsmasq-dns-58db5546cc-m2zdv\" (UID: \"169191a4-579a-4818-be80-bd64998e72a1\") " pod="openstack/dnsmasq-dns-58db5546cc-m2zdv" Nov 21 14:04:39 crc kubenswrapper[5133]: I1121 14:04:39.043906 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/169191a4-579a-4818-be80-bd64998e72a1-ovsdbserver-nb\") pod \"dnsmasq-dns-58db5546cc-m2zdv\" (UID: \"169191a4-579a-4818-be80-bd64998e72a1\") " pod="openstack/dnsmasq-dns-58db5546cc-m2zdv" Nov 21 14:04:39 crc kubenswrapper[5133]: I1121 14:04:39.043935 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1a96d392-2cf2-44c6-94d0-5df6fc14c4d1-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"1a96d392-2cf2-44c6-94d0-5df6fc14c4d1\") " pod="openstack/cinder-scheduler-0" Nov 21 14:04:39 crc kubenswrapper[5133]: I1121 14:04:39.043953 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fph4d\" (UniqueName: \"kubernetes.io/projected/1a96d392-2cf2-44c6-94d0-5df6fc14c4d1-kube-api-access-fph4d\") pod \"cinder-scheduler-0\" (UID: \"1a96d392-2cf2-44c6-94d0-5df6fc14c4d1\") " pod="openstack/cinder-scheduler-0" Nov 21 14:04:39 crc kubenswrapper[5133]: I1121 14:04:39.043980 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a96d392-2cf2-44c6-94d0-5df6fc14c4d1-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"1a96d392-2cf2-44c6-94d0-5df6fc14c4d1\") " pod="openstack/cinder-scheduler-0" Nov 21 14:04:39 crc kubenswrapper[5133]: I1121 14:04:39.044298 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1a96d392-2cf2-44c6-94d0-5df6fc14c4d1-scripts\") pod \"cinder-scheduler-0\" (UID: \"1a96d392-2cf2-44c6-94d0-5df6fc14c4d1\") " pod="openstack/cinder-scheduler-0" Nov 21 14:04:39 crc kubenswrapper[5133]: I1121 14:04:39.044342 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8w62c\" (UniqueName: \"kubernetes.io/projected/169191a4-579a-4818-be80-bd64998e72a1-kube-api-access-8w62c\") pod \"dnsmasq-dns-58db5546cc-m2zdv\" (UID: \"169191a4-579a-4818-be80-bd64998e72a1\") " pod="openstack/dnsmasq-dns-58db5546cc-m2zdv" Nov 21 14:04:39 crc kubenswrapper[5133]: I1121 14:04:39.044444 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1a96d392-2cf2-44c6-94d0-5df6fc14c4d1-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"1a96d392-2cf2-44c6-94d0-5df6fc14c4d1\") " pod="openstack/cinder-scheduler-0" Nov 21 14:04:39 crc kubenswrapper[5133]: I1121 14:04:39.056729 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1a96d392-2cf2-44c6-94d0-5df6fc14c4d1-config-data\") pod \"cinder-scheduler-0\" (UID: \"1a96d392-2cf2-44c6-94d0-5df6fc14c4d1\") " pod="openstack/cinder-scheduler-0" Nov 21 14:04:39 crc kubenswrapper[5133]: I1121 14:04:39.059369 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1a96d392-2cf2-44c6-94d0-5df6fc14c4d1-scripts\") pod \"cinder-scheduler-0\" (UID: \"1a96d392-2cf2-44c6-94d0-5df6fc14c4d1\") " pod="openstack/cinder-scheduler-0" Nov 21 14:04:39 crc kubenswrapper[5133]: I1121 14:04:39.073827 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1a96d392-2cf2-44c6-94d0-5df6fc14c4d1-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"1a96d392-2cf2-44c6-94d0-5df6fc14c4d1\") " pod="openstack/cinder-scheduler-0" Nov 21 14:04:39 crc kubenswrapper[5133]: I1121 14:04:39.112026 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-869f779d85-9cqvh"] Nov 21 14:04:39 crc kubenswrapper[5133]: I1121 14:04:39.119611 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a96d392-2cf2-44c6-94d0-5df6fc14c4d1-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"1a96d392-2cf2-44c6-94d0-5df6fc14c4d1\") " pod="openstack/cinder-scheduler-0" Nov 21 14:04:39 crc kubenswrapper[5133]: I1121 14:04:39.120450 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fph4d\" (UniqueName: \"kubernetes.io/projected/1a96d392-2cf2-44c6-94d0-5df6fc14c4d1-kube-api-access-fph4d\") pod \"cinder-scheduler-0\" (UID: \"1a96d392-2cf2-44c6-94d0-5df6fc14c4d1\") " pod="openstack/cinder-scheduler-0" Nov 21 14:04:39 crc kubenswrapper[5133]: I1121 14:04:39.149231 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8w62c\" (UniqueName: \"kubernetes.io/projected/169191a4-579a-4818-be80-bd64998e72a1-kube-api-access-8w62c\") pod \"dnsmasq-dns-58db5546cc-m2zdv\" (UID: \"169191a4-579a-4818-be80-bd64998e72a1\") " pod="openstack/dnsmasq-dns-58db5546cc-m2zdv" Nov 21 14:04:39 crc kubenswrapper[5133]: I1121 14:04:39.149446 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/169191a4-579a-4818-be80-bd64998e72a1-dns-svc\") pod \"dnsmasq-dns-58db5546cc-m2zdv\" (UID: \"169191a4-579a-4818-be80-bd64998e72a1\") " pod="openstack/dnsmasq-dns-58db5546cc-m2zdv" Nov 21 14:04:39 crc kubenswrapper[5133]: I1121 14:04:39.149554 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/169191a4-579a-4818-be80-bd64998e72a1-config\") pod \"dnsmasq-dns-58db5546cc-m2zdv\" (UID: \"169191a4-579a-4818-be80-bd64998e72a1\") " pod="openstack/dnsmasq-dns-58db5546cc-m2zdv" Nov 21 14:04:39 crc kubenswrapper[5133]: I1121 14:04:39.149613 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/169191a4-579a-4818-be80-bd64998e72a1-ovsdbserver-sb\") pod \"dnsmasq-dns-58db5546cc-m2zdv\" (UID: \"169191a4-579a-4818-be80-bd64998e72a1\") " pod="openstack/dnsmasq-dns-58db5546cc-m2zdv" Nov 21 14:04:39 crc kubenswrapper[5133]: I1121 14:04:39.149747 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/169191a4-579a-4818-be80-bd64998e72a1-ovsdbserver-nb\") pod \"dnsmasq-dns-58db5546cc-m2zdv\" (UID: \"169191a4-579a-4818-be80-bd64998e72a1\") " pod="openstack/dnsmasq-dns-58db5546cc-m2zdv" Nov 21 14:04:39 crc kubenswrapper[5133]: I1121 14:04:39.151198 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/169191a4-579a-4818-be80-bd64998e72a1-ovsdbserver-nb\") pod \"dnsmasq-dns-58db5546cc-m2zdv\" (UID: \"169191a4-579a-4818-be80-bd64998e72a1\") " pod="openstack/dnsmasq-dns-58db5546cc-m2zdv" Nov 21 14:04:39 crc kubenswrapper[5133]: I1121 14:04:39.151202 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 21 14:04:39 crc kubenswrapper[5133]: I1121 14:04:39.151469 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/169191a4-579a-4818-be80-bd64998e72a1-ovsdbserver-sb\") pod \"dnsmasq-dns-58db5546cc-m2zdv\" (UID: \"169191a4-579a-4818-be80-bd64998e72a1\") " pod="openstack/dnsmasq-dns-58db5546cc-m2zdv" Nov 21 14:04:39 crc kubenswrapper[5133]: I1121 14:04:39.151796 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/169191a4-579a-4818-be80-bd64998e72a1-config\") pod \"dnsmasq-dns-58db5546cc-m2zdv\" (UID: \"169191a4-579a-4818-be80-bd64998e72a1\") " pod="openstack/dnsmasq-dns-58db5546cc-m2zdv" Nov 21 14:04:39 crc kubenswrapper[5133]: I1121 14:04:39.152200 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/169191a4-579a-4818-be80-bd64998e72a1-dns-svc\") pod \"dnsmasq-dns-58db5546cc-m2zdv\" (UID: \"169191a4-579a-4818-be80-bd64998e72a1\") " pod="openstack/dnsmasq-dns-58db5546cc-m2zdv" Nov 21 14:04:39 crc kubenswrapper[5133]: I1121 14:04:39.158485 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 21 14:04:39 crc kubenswrapper[5133]: I1121 14:04:39.164291 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 21 14:04:39 crc kubenswrapper[5133]: I1121 14:04:39.174381 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8w62c\" (UniqueName: \"kubernetes.io/projected/169191a4-579a-4818-be80-bd64998e72a1-kube-api-access-8w62c\") pod \"dnsmasq-dns-58db5546cc-m2zdv\" (UID: \"169191a4-579a-4818-be80-bd64998e72a1\") " pod="openstack/dnsmasq-dns-58db5546cc-m2zdv" Nov 21 14:04:39 crc kubenswrapper[5133]: I1121 14:04:39.192227 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 21 14:04:39 crc kubenswrapper[5133]: I1121 14:04:39.226582 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-585c9d5854-m4bwc"] Nov 21 14:04:39 crc kubenswrapper[5133]: I1121 14:04:39.252036 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/45739e81-e946-42b2-9433-aeb0f5a8dc0f-config-data-custom\") pod \"cinder-api-0\" (UID: \"45739e81-e946-42b2-9433-aeb0f5a8dc0f\") " pod="openstack/cinder-api-0" Nov 21 14:04:39 crc kubenswrapper[5133]: I1121 14:04:39.252158 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-24l2n\" (UniqueName: \"kubernetes.io/projected/45739e81-e946-42b2-9433-aeb0f5a8dc0f-kube-api-access-24l2n\") pod \"cinder-api-0\" (UID: \"45739e81-e946-42b2-9433-aeb0f5a8dc0f\") " pod="openstack/cinder-api-0" Nov 21 14:04:39 crc kubenswrapper[5133]: I1121 14:04:39.252211 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/45739e81-e946-42b2-9433-aeb0f5a8dc0f-logs\") pod \"cinder-api-0\" (UID: \"45739e81-e946-42b2-9433-aeb0f5a8dc0f\") " pod="openstack/cinder-api-0" Nov 21 14:04:39 crc kubenswrapper[5133]: I1121 14:04:39.252315 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/45739e81-e946-42b2-9433-aeb0f5a8dc0f-config-data\") pod \"cinder-api-0\" (UID: \"45739e81-e946-42b2-9433-aeb0f5a8dc0f\") " pod="openstack/cinder-api-0" Nov 21 14:04:39 crc kubenswrapper[5133]: I1121 14:04:39.252359 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/45739e81-e946-42b2-9433-aeb0f5a8dc0f-etc-machine-id\") pod \"cinder-api-0\" (UID: \"45739e81-e946-42b2-9433-aeb0f5a8dc0f\") " pod="openstack/cinder-api-0" Nov 21 14:04:39 crc kubenswrapper[5133]: I1121 14:04:39.252394 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/45739e81-e946-42b2-9433-aeb0f5a8dc0f-scripts\") pod \"cinder-api-0\" (UID: \"45739e81-e946-42b2-9433-aeb0f5a8dc0f\") " pod="openstack/cinder-api-0" Nov 21 14:04:39 crc kubenswrapper[5133]: I1121 14:04:39.252421 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45739e81-e946-42b2-9433-aeb0f5a8dc0f-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"45739e81-e946-42b2-9433-aeb0f5a8dc0f\") " pod="openstack/cinder-api-0" Nov 21 14:04:39 crc kubenswrapper[5133]: I1121 14:04:39.300276 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-58db5546cc-m2zdv" Nov 21 14:04:39 crc kubenswrapper[5133]: I1121 14:04:39.354509 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-24l2n\" (UniqueName: \"kubernetes.io/projected/45739e81-e946-42b2-9433-aeb0f5a8dc0f-kube-api-access-24l2n\") pod \"cinder-api-0\" (UID: \"45739e81-e946-42b2-9433-aeb0f5a8dc0f\") " pod="openstack/cinder-api-0" Nov 21 14:04:39 crc kubenswrapper[5133]: I1121 14:04:39.354621 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/45739e81-e946-42b2-9433-aeb0f5a8dc0f-logs\") pod \"cinder-api-0\" (UID: \"45739e81-e946-42b2-9433-aeb0f5a8dc0f\") " pod="openstack/cinder-api-0" Nov 21 14:04:39 crc kubenswrapper[5133]: I1121 14:04:39.354680 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/45739e81-e946-42b2-9433-aeb0f5a8dc0f-config-data\") pod \"cinder-api-0\" (UID: \"45739e81-e946-42b2-9433-aeb0f5a8dc0f\") " pod="openstack/cinder-api-0" Nov 21 14:04:39 crc kubenswrapper[5133]: I1121 14:04:39.354728 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/45739e81-e946-42b2-9433-aeb0f5a8dc0f-etc-machine-id\") pod \"cinder-api-0\" (UID: \"45739e81-e946-42b2-9433-aeb0f5a8dc0f\") " pod="openstack/cinder-api-0" Nov 21 14:04:39 crc kubenswrapper[5133]: I1121 14:04:39.354769 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/45739e81-e946-42b2-9433-aeb0f5a8dc0f-scripts\") pod \"cinder-api-0\" (UID: \"45739e81-e946-42b2-9433-aeb0f5a8dc0f\") " pod="openstack/cinder-api-0" Nov 21 14:04:39 crc kubenswrapper[5133]: I1121 14:04:39.354802 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45739e81-e946-42b2-9433-aeb0f5a8dc0f-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"45739e81-e946-42b2-9433-aeb0f5a8dc0f\") " pod="openstack/cinder-api-0" Nov 21 14:04:39 crc kubenswrapper[5133]: I1121 14:04:39.354828 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/45739e81-e946-42b2-9433-aeb0f5a8dc0f-config-data-custom\") pod \"cinder-api-0\" (UID: \"45739e81-e946-42b2-9433-aeb0f5a8dc0f\") " pod="openstack/cinder-api-0" Nov 21 14:04:39 crc kubenswrapper[5133]: I1121 14:04:39.361415 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/45739e81-e946-42b2-9433-aeb0f5a8dc0f-config-data-custom\") pod \"cinder-api-0\" (UID: \"45739e81-e946-42b2-9433-aeb0f5a8dc0f\") " pod="openstack/cinder-api-0" Nov 21 14:04:39 crc kubenswrapper[5133]: I1121 14:04:39.362617 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/45739e81-e946-42b2-9433-aeb0f5a8dc0f-etc-machine-id\") pod \"cinder-api-0\" (UID: \"45739e81-e946-42b2-9433-aeb0f5a8dc0f\") " pod="openstack/cinder-api-0" Nov 21 14:04:39 crc kubenswrapper[5133]: I1121 14:04:39.363265 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/45739e81-e946-42b2-9433-aeb0f5a8dc0f-logs\") pod \"cinder-api-0\" (UID: \"45739e81-e946-42b2-9433-aeb0f5a8dc0f\") " pod="openstack/cinder-api-0" Nov 21 14:04:39 crc kubenswrapper[5133]: I1121 14:04:39.364825 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 21 14:04:39 crc kubenswrapper[5133]: I1121 14:04:39.376442 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/45739e81-e946-42b2-9433-aeb0f5a8dc0f-config-data\") pod \"cinder-api-0\" (UID: \"45739e81-e946-42b2-9433-aeb0f5a8dc0f\") " pod="openstack/cinder-api-0" Nov 21 14:04:39 crc kubenswrapper[5133]: I1121 14:04:39.380795 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/45739e81-e946-42b2-9433-aeb0f5a8dc0f-scripts\") pod \"cinder-api-0\" (UID: \"45739e81-e946-42b2-9433-aeb0f5a8dc0f\") " pod="openstack/cinder-api-0" Nov 21 14:04:39 crc kubenswrapper[5133]: I1121 14:04:39.381144 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45739e81-e946-42b2-9433-aeb0f5a8dc0f-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"45739e81-e946-42b2-9433-aeb0f5a8dc0f\") " pod="openstack/cinder-api-0" Nov 21 14:04:39 crc kubenswrapper[5133]: I1121 14:04:39.400014 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-24l2n\" (UniqueName: \"kubernetes.io/projected/45739e81-e946-42b2-9433-aeb0f5a8dc0f-kube-api-access-24l2n\") pod \"cinder-api-0\" (UID: \"45739e81-e946-42b2-9433-aeb0f5a8dc0f\") " pod="openstack/cinder-api-0" Nov 21 14:04:39 crc kubenswrapper[5133]: I1121 14:04:39.507886 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 21 14:04:39 crc kubenswrapper[5133]: I1121 14:04:39.573973 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-869f779d85-9cqvh" event={"ID":"33026c97-aabd-489f-a019-ff1337ff971b","Type":"ContainerStarted","Data":"c919d5031b503505a172b11eb34e32c4b0bbda6836c4a0686c5f02eac49375c6"} Nov 21 14:04:39 crc kubenswrapper[5133]: I1121 14:04:39.575456 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-68665bb7f8-mv95p" event={"ID":"f583d06c-f59e-4f1e-883b-0ad7617e3c0f","Type":"ContainerStarted","Data":"8c03941e3dcecd7d9545e56c1225a9a0ac02b6abb2398d65de737da304ee3afe"} Nov 21 14:04:39 crc kubenswrapper[5133]: I1121 14:04:39.583122 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-585c9d5854-m4bwc" event={"ID":"3b99cb93-e45a-4c5a-a449-eafa3241be56","Type":"ContainerStarted","Data":"15ee1a3f491a1ffa03c07748b00a08afbee9c3d0f53dbd1043f0baf55782f42b"} Nov 21 14:04:39 crc kubenswrapper[5133]: I1121 14:04:39.587525 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-654ff44bc9-5rc2k" event={"ID":"1701df73-8d79-4305-a204-4ab7bf029dfd","Type":"ContainerStarted","Data":"7b3ed74b2b133c88cf1be98ff4751743b3ba15fe0e2727c89457fcf36188f314"} Nov 21 14:04:39 crc kubenswrapper[5133]: I1121 14:04:39.844903 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 21 14:04:39 crc kubenswrapper[5133]: I1121 14:04:39.950889 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-58db5546cc-m2zdv"] Nov 21 14:04:40 crc kubenswrapper[5133]: I1121 14:04:40.142165 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 21 14:04:40 crc kubenswrapper[5133]: I1121 14:04:40.470780 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="92dd2b99-d1ce-4bdf-a002-94828c44bb40" path="/var/lib/kubelet/pods/92dd2b99-d1ce-4bdf-a002-94828c44bb40/volumes" Nov 21 14:04:40 crc kubenswrapper[5133]: I1121 14:04:40.601737 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-585c9d5854-m4bwc" event={"ID":"3b99cb93-e45a-4c5a-a449-eafa3241be56","Type":"ContainerStarted","Data":"fc4f79f344f6ec1b3dfae79168535a9af52a0cf9572a1a4c45c62162d45e0d7b"} Nov 21 14:04:40 crc kubenswrapper[5133]: I1121 14:04:40.601808 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-585c9d5854-m4bwc" event={"ID":"3b99cb93-e45a-4c5a-a449-eafa3241be56","Type":"ContainerStarted","Data":"189352f52a99ca5f194736c700010e6bc79bd231f68591b8aeee19fc2286688b"} Nov 21 14:04:40 crc kubenswrapper[5133]: I1121 14:04:40.602084 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-585c9d5854-m4bwc" Nov 21 14:04:40 crc kubenswrapper[5133]: I1121 14:04:40.602287 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-585c9d5854-m4bwc" Nov 21 14:04:40 crc kubenswrapper[5133]: I1121 14:04:40.603943 5133 generic.go:334] "Generic (PLEG): container finished" podID="33026c97-aabd-489f-a019-ff1337ff971b" containerID="0bda2eed8c7f11e823734b0c22dc9d94b9a3a99d9c4d948e006c28f95dc9d66e" exitCode=0 Nov 21 14:04:40 crc kubenswrapper[5133]: I1121 14:04:40.604098 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-869f779d85-9cqvh" event={"ID":"33026c97-aabd-489f-a019-ff1337ff971b","Type":"ContainerDied","Data":"0bda2eed8c7f11e823734b0c22dc9d94b9a3a99d9c4d948e006c28f95dc9d66e"} Nov 21 14:04:40 crc kubenswrapper[5133]: I1121 14:04:40.608065 5133 generic.go:334] "Generic (PLEG): container finished" podID="169191a4-579a-4818-be80-bd64998e72a1" containerID="200a2a8233cefd447a4af9b72a9f5316962f04c159599d478a43649dadf8a74f" exitCode=0 Nov 21 14:04:40 crc kubenswrapper[5133]: I1121 14:04:40.608175 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-58db5546cc-m2zdv" event={"ID":"169191a4-579a-4818-be80-bd64998e72a1","Type":"ContainerDied","Data":"200a2a8233cefd447a4af9b72a9f5316962f04c159599d478a43649dadf8a74f"} Nov 21 14:04:40 crc kubenswrapper[5133]: I1121 14:04:40.608221 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-58db5546cc-m2zdv" event={"ID":"169191a4-579a-4818-be80-bd64998e72a1","Type":"ContainerStarted","Data":"af03e8d8f98f99e83743c383897c935ffc12e8534fb16042dcd560f79bcf32ab"} Nov 21 14:04:40 crc kubenswrapper[5133]: I1121 14:04:40.626784 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"1a96d392-2cf2-44c6-94d0-5df6fc14c4d1","Type":"ContainerStarted","Data":"6b1aa3e98a5abfeabb327fc846990830ce67e374ce055fb9f40f4defa8723fa3"} Nov 21 14:04:40 crc kubenswrapper[5133]: I1121 14:04:40.632916 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-585c9d5854-m4bwc" podStartSLOduration=3.63288427 podStartE2EDuration="3.63288427s" podCreationTimestamp="2025-11-21 14:04:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 14:04:40.623208781 +0000 UTC m=+1340.421041049" watchObservedRunningTime="2025-11-21 14:04:40.63288427 +0000 UTC m=+1340.430716518" Nov 21 14:04:41 crc kubenswrapper[5133]: I1121 14:04:41.496905 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-869f779d85-9cqvh" Nov 21 14:04:41 crc kubenswrapper[5133]: I1121 14:04:41.651773 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-68665bb7f8-mv95p" event={"ID":"f583d06c-f59e-4f1e-883b-0ad7617e3c0f","Type":"ContainerStarted","Data":"2534097f44dcb8564c568409618839a05c9175391d67b08cda9eb6d354c17aa8"} Nov 21 14:04:41 crc kubenswrapper[5133]: I1121 14:04:41.657427 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"45739e81-e946-42b2-9433-aeb0f5a8dc0f","Type":"ContainerStarted","Data":"e46390a0be42c46fc37e9ab0800c5d6193ed18609244d362711d5b9cee0e2f5d"} Nov 21 14:04:41 crc kubenswrapper[5133]: I1121 14:04:41.666984 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-58db5546cc-m2zdv" event={"ID":"169191a4-579a-4818-be80-bd64998e72a1","Type":"ContainerStarted","Data":"db8ea5ebfe98df32bba95baf1d4f81c88c690b023887dd2e2f44297073139ebb"} Nov 21 14:04:41 crc kubenswrapper[5133]: I1121 14:04:41.672958 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-58db5546cc-m2zdv" Nov 21 14:04:41 crc kubenswrapper[5133]: I1121 14:04:41.675331 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/33026c97-aabd-489f-a019-ff1337ff971b-config\") pod \"33026c97-aabd-489f-a019-ff1337ff971b\" (UID: \"33026c97-aabd-489f-a019-ff1337ff971b\") " Nov 21 14:04:41 crc kubenswrapper[5133]: I1121 14:04:41.675575 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/33026c97-aabd-489f-a019-ff1337ff971b-ovsdbserver-sb\") pod \"33026c97-aabd-489f-a019-ff1337ff971b\" (UID: \"33026c97-aabd-489f-a019-ff1337ff971b\") " Nov 21 14:04:41 crc kubenswrapper[5133]: I1121 14:04:41.675973 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/33026c97-aabd-489f-a019-ff1337ff971b-dns-svc\") pod \"33026c97-aabd-489f-a019-ff1337ff971b\" (UID: \"33026c97-aabd-489f-a019-ff1337ff971b\") " Nov 21 14:04:41 crc kubenswrapper[5133]: I1121 14:04:41.676061 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kr9g5\" (UniqueName: \"kubernetes.io/projected/33026c97-aabd-489f-a019-ff1337ff971b-kube-api-access-kr9g5\") pod \"33026c97-aabd-489f-a019-ff1337ff971b\" (UID: \"33026c97-aabd-489f-a019-ff1337ff971b\") " Nov 21 14:04:41 crc kubenswrapper[5133]: I1121 14:04:41.676160 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/33026c97-aabd-489f-a019-ff1337ff971b-ovsdbserver-nb\") pod \"33026c97-aabd-489f-a019-ff1337ff971b\" (UID: \"33026c97-aabd-489f-a019-ff1337ff971b\") " Nov 21 14:04:41 crc kubenswrapper[5133]: I1121 14:04:41.702551 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-869f779d85-9cqvh" event={"ID":"33026c97-aabd-489f-a019-ff1337ff971b","Type":"ContainerDied","Data":"c919d5031b503505a172b11eb34e32c4b0bbda6836c4a0686c5f02eac49375c6"} Nov 21 14:04:41 crc kubenswrapper[5133]: I1121 14:04:41.702647 5133 scope.go:117] "RemoveContainer" containerID="0bda2eed8c7f11e823734b0c22dc9d94b9a3a99d9c4d948e006c28f95dc9d66e" Nov 21 14:04:41 crc kubenswrapper[5133]: I1121 14:04:41.702963 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-869f779d85-9cqvh" Nov 21 14:04:41 crc kubenswrapper[5133]: I1121 14:04:41.713054 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-58db5546cc-m2zdv" podStartSLOduration=3.7130231719999998 podStartE2EDuration="3.713023172s" podCreationTimestamp="2025-11-21 14:04:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 14:04:41.691946699 +0000 UTC m=+1341.489778947" watchObservedRunningTime="2025-11-21 14:04:41.713023172 +0000 UTC m=+1341.510855410" Nov 21 14:04:41 crc kubenswrapper[5133]: I1121 14:04:41.715779 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/33026c97-aabd-489f-a019-ff1337ff971b-kube-api-access-kr9g5" (OuterVolumeSpecName: "kube-api-access-kr9g5") pod "33026c97-aabd-489f-a019-ff1337ff971b" (UID: "33026c97-aabd-489f-a019-ff1337ff971b"). InnerVolumeSpecName "kube-api-access-kr9g5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:04:41 crc kubenswrapper[5133]: I1121 14:04:41.740041 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/33026c97-aabd-489f-a019-ff1337ff971b-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "33026c97-aabd-489f-a019-ff1337ff971b" (UID: "33026c97-aabd-489f-a019-ff1337ff971b"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:04:41 crc kubenswrapper[5133]: I1121 14:04:41.743124 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/33026c97-aabd-489f-a019-ff1337ff971b-config" (OuterVolumeSpecName: "config") pod "33026c97-aabd-489f-a019-ff1337ff971b" (UID: "33026c97-aabd-489f-a019-ff1337ff971b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:04:41 crc kubenswrapper[5133]: I1121 14:04:41.762872 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/33026c97-aabd-489f-a019-ff1337ff971b-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "33026c97-aabd-489f-a019-ff1337ff971b" (UID: "33026c97-aabd-489f-a019-ff1337ff971b"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:04:41 crc kubenswrapper[5133]: I1121 14:04:41.772574 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/33026c97-aabd-489f-a019-ff1337ff971b-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "33026c97-aabd-489f-a019-ff1337ff971b" (UID: "33026c97-aabd-489f-a019-ff1337ff971b"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:04:41 crc kubenswrapper[5133]: I1121 14:04:41.781457 5133 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/33026c97-aabd-489f-a019-ff1337ff971b-config\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:41 crc kubenswrapper[5133]: I1121 14:04:41.781503 5133 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/33026c97-aabd-489f-a019-ff1337ff971b-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:41 crc kubenswrapper[5133]: I1121 14:04:41.781521 5133 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/33026c97-aabd-489f-a019-ff1337ff971b-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:41 crc kubenswrapper[5133]: I1121 14:04:41.781532 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kr9g5\" (UniqueName: \"kubernetes.io/projected/33026c97-aabd-489f-a019-ff1337ff971b-kube-api-access-kr9g5\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:41 crc kubenswrapper[5133]: I1121 14:04:41.781541 5133 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/33026c97-aabd-489f-a019-ff1337ff971b-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:42 crc kubenswrapper[5133]: I1121 14:04:42.109413 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-869f779d85-9cqvh"] Nov 21 14:04:42 crc kubenswrapper[5133]: I1121 14:04:42.138574 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-869f779d85-9cqvh"] Nov 21 14:04:42 crc kubenswrapper[5133]: I1121 14:04:42.475087 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="33026c97-aabd-489f-a019-ff1337ff971b" path="/var/lib/kubelet/pods/33026c97-aabd-489f-a019-ff1337ff971b/volumes" Nov 21 14:04:42 crc kubenswrapper[5133]: I1121 14:04:42.496903 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 21 14:04:42 crc kubenswrapper[5133]: I1121 14:04:42.759397 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-68665bb7f8-mv95p" event={"ID":"f583d06c-f59e-4f1e-883b-0ad7617e3c0f","Type":"ContainerStarted","Data":"ebba76ec99505d5caa8a36f4d71623c21a397f811e7b714053fca4ddefa7e0ce"} Nov 21 14:04:42 crc kubenswrapper[5133]: I1121 14:04:42.771797 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"45739e81-e946-42b2-9433-aeb0f5a8dc0f","Type":"ContainerStarted","Data":"edaa351693cd10b2b7431fa1aba1cda1e75d9dd9d45e1390487dc85390040439"} Nov 21 14:04:42 crc kubenswrapper[5133]: I1121 14:04:42.774187 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"1a96d392-2cf2-44c6-94d0-5df6fc14c4d1","Type":"ContainerStarted","Data":"cfe08cd6dc443799fe9db2f54ab378374873da8c059ebd32e58e0b4023c9ab1c"} Nov 21 14:04:42 crc kubenswrapper[5133]: I1121 14:04:42.809452 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-68665bb7f8-mv95p" podStartSLOduration=3.392830528 podStartE2EDuration="5.809435377s" podCreationTimestamp="2025-11-21 14:04:37 +0000 UTC" firstStartedPulling="2025-11-21 14:04:38.874669677 +0000 UTC m=+1338.672501925" lastFinishedPulling="2025-11-21 14:04:41.291274526 +0000 UTC m=+1341.089106774" observedRunningTime="2025-11-21 14:04:42.80580717 +0000 UTC m=+1342.603639418" watchObservedRunningTime="2025-11-21 14:04:42.809435377 +0000 UTC m=+1342.607267625" Nov 21 14:04:42 crc kubenswrapper[5133]: I1121 14:04:42.817018 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-654ff44bc9-5rc2k" event={"ID":"1701df73-8d79-4305-a204-4ab7bf029dfd","Type":"ContainerStarted","Data":"95c9a935f2438d0d6573f1d0b555891cef95368d9ddd17af516de269138df845"} Nov 21 14:04:42 crc kubenswrapper[5133]: I1121 14:04:42.817077 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-654ff44bc9-5rc2k" event={"ID":"1701df73-8d79-4305-a204-4ab7bf029dfd","Type":"ContainerStarted","Data":"f45f23c5a10750c2557018b318272010fe1d5c19a2226c5f6ebf68973a686c69"} Nov 21 14:04:42 crc kubenswrapper[5133]: I1121 14:04:42.867931 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-654ff44bc9-5rc2k" podStartSLOduration=3.418273137 podStartE2EDuration="5.867902479s" podCreationTimestamp="2025-11-21 14:04:37 +0000 UTC" firstStartedPulling="2025-11-21 14:04:38.874472571 +0000 UTC m=+1338.672304819" lastFinishedPulling="2025-11-21 14:04:41.324101913 +0000 UTC m=+1341.121934161" observedRunningTime="2025-11-21 14:04:42.857357817 +0000 UTC m=+1342.655190075" watchObservedRunningTime="2025-11-21 14:04:42.867902479 +0000 UTC m=+1342.665734717" Nov 21 14:04:43 crc kubenswrapper[5133]: I1121 14:04:43.792199 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-6f698694b-sk8kx"] Nov 21 14:04:43 crc kubenswrapper[5133]: E1121 14:04:43.795763 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33026c97-aabd-489f-a019-ff1337ff971b" containerName="init" Nov 21 14:04:43 crc kubenswrapper[5133]: I1121 14:04:43.795801 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="33026c97-aabd-489f-a019-ff1337ff971b" containerName="init" Nov 21 14:04:43 crc kubenswrapper[5133]: I1121 14:04:43.796122 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="33026c97-aabd-489f-a019-ff1337ff971b" containerName="init" Nov 21 14:04:43 crc kubenswrapper[5133]: I1121 14:04:43.797508 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-6f698694b-sk8kx" Nov 21 14:04:43 crc kubenswrapper[5133]: I1121 14:04:43.799956 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Nov 21 14:04:43 crc kubenswrapper[5133]: I1121 14:04:43.800325 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Nov 21 14:04:43 crc kubenswrapper[5133]: I1121 14:04:43.810219 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-6f698694b-sk8kx"] Nov 21 14:04:43 crc kubenswrapper[5133]: I1121 14:04:43.837419 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"45739e81-e946-42b2-9433-aeb0f5a8dc0f","Type":"ContainerStarted","Data":"b1ea806462cb25fa9b6de93579d513e6f50ca1380c2d9f047abf0feb873e5afe"} Nov 21 14:04:43 crc kubenswrapper[5133]: I1121 14:04:43.837512 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="45739e81-e946-42b2-9433-aeb0f5a8dc0f" containerName="cinder-api-log" containerID="cri-o://edaa351693cd10b2b7431fa1aba1cda1e75d9dd9d45e1390487dc85390040439" gracePeriod=30 Nov 21 14:04:43 crc kubenswrapper[5133]: I1121 14:04:43.837640 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="45739e81-e946-42b2-9433-aeb0f5a8dc0f" containerName="cinder-api" containerID="cri-o://b1ea806462cb25fa9b6de93579d513e6f50ca1380c2d9f047abf0feb873e5afe" gracePeriod=30 Nov 21 14:04:43 crc kubenswrapper[5133]: I1121 14:04:43.837854 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 21 14:04:43 crc kubenswrapper[5133]: I1121 14:04:43.869232 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"1a96d392-2cf2-44c6-94d0-5df6fc14c4d1","Type":"ContainerStarted","Data":"17ace0b59af117302ec2a72213954bdcf8474ff0be18df7894b3886c583675ef"} Nov 21 14:04:43 crc kubenswrapper[5133]: I1121 14:04:43.936194 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/40cd1965-9315-4ed8-8902-4d9f77e63740-internal-tls-certs\") pod \"barbican-api-6f698694b-sk8kx\" (UID: \"40cd1965-9315-4ed8-8902-4d9f77e63740\") " pod="openstack/barbican-api-6f698694b-sk8kx" Nov 21 14:04:43 crc kubenswrapper[5133]: I1121 14:04:43.936275 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/40cd1965-9315-4ed8-8902-4d9f77e63740-config-data\") pod \"barbican-api-6f698694b-sk8kx\" (UID: \"40cd1965-9315-4ed8-8902-4d9f77e63740\") " pod="openstack/barbican-api-6f698694b-sk8kx" Nov 21 14:04:43 crc kubenswrapper[5133]: I1121 14:04:43.936358 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40cd1965-9315-4ed8-8902-4d9f77e63740-combined-ca-bundle\") pod \"barbican-api-6f698694b-sk8kx\" (UID: \"40cd1965-9315-4ed8-8902-4d9f77e63740\") " pod="openstack/barbican-api-6f698694b-sk8kx" Nov 21 14:04:43 crc kubenswrapper[5133]: I1121 14:04:43.936462 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/40cd1965-9315-4ed8-8902-4d9f77e63740-public-tls-certs\") pod \"barbican-api-6f698694b-sk8kx\" (UID: \"40cd1965-9315-4ed8-8902-4d9f77e63740\") " pod="openstack/barbican-api-6f698694b-sk8kx" Nov 21 14:04:43 crc kubenswrapper[5133]: I1121 14:04:43.936574 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h4kml\" (UniqueName: \"kubernetes.io/projected/40cd1965-9315-4ed8-8902-4d9f77e63740-kube-api-access-h4kml\") pod \"barbican-api-6f698694b-sk8kx\" (UID: \"40cd1965-9315-4ed8-8902-4d9f77e63740\") " pod="openstack/barbican-api-6f698694b-sk8kx" Nov 21 14:04:43 crc kubenswrapper[5133]: I1121 14:04:43.936610 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/40cd1965-9315-4ed8-8902-4d9f77e63740-logs\") pod \"barbican-api-6f698694b-sk8kx\" (UID: \"40cd1965-9315-4ed8-8902-4d9f77e63740\") " pod="openstack/barbican-api-6f698694b-sk8kx" Nov 21 14:04:43 crc kubenswrapper[5133]: I1121 14:04:43.936651 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/40cd1965-9315-4ed8-8902-4d9f77e63740-config-data-custom\") pod \"barbican-api-6f698694b-sk8kx\" (UID: \"40cd1965-9315-4ed8-8902-4d9f77e63740\") " pod="openstack/barbican-api-6f698694b-sk8kx" Nov 21 14:04:44 crc kubenswrapper[5133]: I1121 14:04:44.038173 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h4kml\" (UniqueName: \"kubernetes.io/projected/40cd1965-9315-4ed8-8902-4d9f77e63740-kube-api-access-h4kml\") pod \"barbican-api-6f698694b-sk8kx\" (UID: \"40cd1965-9315-4ed8-8902-4d9f77e63740\") " pod="openstack/barbican-api-6f698694b-sk8kx" Nov 21 14:04:44 crc kubenswrapper[5133]: I1121 14:04:44.038290 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/40cd1965-9315-4ed8-8902-4d9f77e63740-logs\") pod \"barbican-api-6f698694b-sk8kx\" (UID: \"40cd1965-9315-4ed8-8902-4d9f77e63740\") " pod="openstack/barbican-api-6f698694b-sk8kx" Nov 21 14:04:44 crc kubenswrapper[5133]: I1121 14:04:44.038316 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/40cd1965-9315-4ed8-8902-4d9f77e63740-config-data-custom\") pod \"barbican-api-6f698694b-sk8kx\" (UID: \"40cd1965-9315-4ed8-8902-4d9f77e63740\") " pod="openstack/barbican-api-6f698694b-sk8kx" Nov 21 14:04:44 crc kubenswrapper[5133]: I1121 14:04:44.038352 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/40cd1965-9315-4ed8-8902-4d9f77e63740-internal-tls-certs\") pod \"barbican-api-6f698694b-sk8kx\" (UID: \"40cd1965-9315-4ed8-8902-4d9f77e63740\") " pod="openstack/barbican-api-6f698694b-sk8kx" Nov 21 14:04:44 crc kubenswrapper[5133]: I1121 14:04:44.038379 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/40cd1965-9315-4ed8-8902-4d9f77e63740-config-data\") pod \"barbican-api-6f698694b-sk8kx\" (UID: \"40cd1965-9315-4ed8-8902-4d9f77e63740\") " pod="openstack/barbican-api-6f698694b-sk8kx" Nov 21 14:04:44 crc kubenswrapper[5133]: I1121 14:04:44.038443 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40cd1965-9315-4ed8-8902-4d9f77e63740-combined-ca-bundle\") pod \"barbican-api-6f698694b-sk8kx\" (UID: \"40cd1965-9315-4ed8-8902-4d9f77e63740\") " pod="openstack/barbican-api-6f698694b-sk8kx" Nov 21 14:04:44 crc kubenswrapper[5133]: I1121 14:04:44.038553 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/40cd1965-9315-4ed8-8902-4d9f77e63740-public-tls-certs\") pod \"barbican-api-6f698694b-sk8kx\" (UID: \"40cd1965-9315-4ed8-8902-4d9f77e63740\") " pod="openstack/barbican-api-6f698694b-sk8kx" Nov 21 14:04:44 crc kubenswrapper[5133]: I1121 14:04:44.040506 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/40cd1965-9315-4ed8-8902-4d9f77e63740-logs\") pod \"barbican-api-6f698694b-sk8kx\" (UID: \"40cd1965-9315-4ed8-8902-4d9f77e63740\") " pod="openstack/barbican-api-6f698694b-sk8kx" Nov 21 14:04:44 crc kubenswrapper[5133]: I1121 14:04:44.047903 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40cd1965-9315-4ed8-8902-4d9f77e63740-combined-ca-bundle\") pod \"barbican-api-6f698694b-sk8kx\" (UID: \"40cd1965-9315-4ed8-8902-4d9f77e63740\") " pod="openstack/barbican-api-6f698694b-sk8kx" Nov 21 14:04:44 crc kubenswrapper[5133]: I1121 14:04:44.048621 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/40cd1965-9315-4ed8-8902-4d9f77e63740-internal-tls-certs\") pod \"barbican-api-6f698694b-sk8kx\" (UID: \"40cd1965-9315-4ed8-8902-4d9f77e63740\") " pod="openstack/barbican-api-6f698694b-sk8kx" Nov 21 14:04:44 crc kubenswrapper[5133]: I1121 14:04:44.049988 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/40cd1965-9315-4ed8-8902-4d9f77e63740-public-tls-certs\") pod \"barbican-api-6f698694b-sk8kx\" (UID: \"40cd1965-9315-4ed8-8902-4d9f77e63740\") " pod="openstack/barbican-api-6f698694b-sk8kx" Nov 21 14:04:44 crc kubenswrapper[5133]: I1121 14:04:44.050386 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/40cd1965-9315-4ed8-8902-4d9f77e63740-config-data-custom\") pod \"barbican-api-6f698694b-sk8kx\" (UID: \"40cd1965-9315-4ed8-8902-4d9f77e63740\") " pod="openstack/barbican-api-6f698694b-sk8kx" Nov 21 14:04:44 crc kubenswrapper[5133]: I1121 14:04:44.051601 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/40cd1965-9315-4ed8-8902-4d9f77e63740-config-data\") pod \"barbican-api-6f698694b-sk8kx\" (UID: \"40cd1965-9315-4ed8-8902-4d9f77e63740\") " pod="openstack/barbican-api-6f698694b-sk8kx" Nov 21 14:04:44 crc kubenswrapper[5133]: I1121 14:04:44.064189 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h4kml\" (UniqueName: \"kubernetes.io/projected/40cd1965-9315-4ed8-8902-4d9f77e63740-kube-api-access-h4kml\") pod \"barbican-api-6f698694b-sk8kx\" (UID: \"40cd1965-9315-4ed8-8902-4d9f77e63740\") " pod="openstack/barbican-api-6f698694b-sk8kx" Nov 21 14:04:44 crc kubenswrapper[5133]: I1121 14:04:44.118662 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-6f698694b-sk8kx" Nov 21 14:04:44 crc kubenswrapper[5133]: I1121 14:04:44.540591 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 21 14:04:44 crc kubenswrapper[5133]: I1121 14:04:44.648989 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/45739e81-e946-42b2-9433-aeb0f5a8dc0f-logs\") pod \"45739e81-e946-42b2-9433-aeb0f5a8dc0f\" (UID: \"45739e81-e946-42b2-9433-aeb0f5a8dc0f\") " Nov 21 14:04:44 crc kubenswrapper[5133]: I1121 14:04:44.649214 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/45739e81-e946-42b2-9433-aeb0f5a8dc0f-scripts\") pod \"45739e81-e946-42b2-9433-aeb0f5a8dc0f\" (UID: \"45739e81-e946-42b2-9433-aeb0f5a8dc0f\") " Nov 21 14:04:44 crc kubenswrapper[5133]: I1121 14:04:44.649249 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/45739e81-e946-42b2-9433-aeb0f5a8dc0f-etc-machine-id\") pod \"45739e81-e946-42b2-9433-aeb0f5a8dc0f\" (UID: \"45739e81-e946-42b2-9433-aeb0f5a8dc0f\") " Nov 21 14:04:44 crc kubenswrapper[5133]: I1121 14:04:44.649298 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45739e81-e946-42b2-9433-aeb0f5a8dc0f-combined-ca-bundle\") pod \"45739e81-e946-42b2-9433-aeb0f5a8dc0f\" (UID: \"45739e81-e946-42b2-9433-aeb0f5a8dc0f\") " Nov 21 14:04:44 crc kubenswrapper[5133]: I1121 14:04:44.649421 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/45739e81-e946-42b2-9433-aeb0f5a8dc0f-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "45739e81-e946-42b2-9433-aeb0f5a8dc0f" (UID: "45739e81-e946-42b2-9433-aeb0f5a8dc0f"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 14:04:44 crc kubenswrapper[5133]: I1121 14:04:44.649469 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/45739e81-e946-42b2-9433-aeb0f5a8dc0f-config-data\") pod \"45739e81-e946-42b2-9433-aeb0f5a8dc0f\" (UID: \"45739e81-e946-42b2-9433-aeb0f5a8dc0f\") " Nov 21 14:04:44 crc kubenswrapper[5133]: I1121 14:04:44.649506 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/45739e81-e946-42b2-9433-aeb0f5a8dc0f-config-data-custom\") pod \"45739e81-e946-42b2-9433-aeb0f5a8dc0f\" (UID: \"45739e81-e946-42b2-9433-aeb0f5a8dc0f\") " Nov 21 14:04:44 crc kubenswrapper[5133]: I1121 14:04:44.649610 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-24l2n\" (UniqueName: \"kubernetes.io/projected/45739e81-e946-42b2-9433-aeb0f5a8dc0f-kube-api-access-24l2n\") pod \"45739e81-e946-42b2-9433-aeb0f5a8dc0f\" (UID: \"45739e81-e946-42b2-9433-aeb0f5a8dc0f\") " Nov 21 14:04:44 crc kubenswrapper[5133]: I1121 14:04:44.650177 5133 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/45739e81-e946-42b2-9433-aeb0f5a8dc0f-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:44 crc kubenswrapper[5133]: I1121 14:04:44.650676 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/45739e81-e946-42b2-9433-aeb0f5a8dc0f-logs" (OuterVolumeSpecName: "logs") pod "45739e81-e946-42b2-9433-aeb0f5a8dc0f" (UID: "45739e81-e946-42b2-9433-aeb0f5a8dc0f"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:04:44 crc kubenswrapper[5133]: I1121 14:04:44.657578 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/45739e81-e946-42b2-9433-aeb0f5a8dc0f-kube-api-access-24l2n" (OuterVolumeSpecName: "kube-api-access-24l2n") pod "45739e81-e946-42b2-9433-aeb0f5a8dc0f" (UID: "45739e81-e946-42b2-9433-aeb0f5a8dc0f"). InnerVolumeSpecName "kube-api-access-24l2n". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:04:44 crc kubenswrapper[5133]: I1121 14:04:44.661149 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/45739e81-e946-42b2-9433-aeb0f5a8dc0f-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "45739e81-e946-42b2-9433-aeb0f5a8dc0f" (UID: "45739e81-e946-42b2-9433-aeb0f5a8dc0f"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:04:44 crc kubenswrapper[5133]: I1121 14:04:44.661310 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/45739e81-e946-42b2-9433-aeb0f5a8dc0f-scripts" (OuterVolumeSpecName: "scripts") pod "45739e81-e946-42b2-9433-aeb0f5a8dc0f" (UID: "45739e81-e946-42b2-9433-aeb0f5a8dc0f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:04:44 crc kubenswrapper[5133]: I1121 14:04:44.684563 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/45739e81-e946-42b2-9433-aeb0f5a8dc0f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "45739e81-e946-42b2-9433-aeb0f5a8dc0f" (UID: "45739e81-e946-42b2-9433-aeb0f5a8dc0f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:04:44 crc kubenswrapper[5133]: I1121 14:04:44.690916 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-6f698694b-sk8kx"] Nov 21 14:04:44 crc kubenswrapper[5133]: I1121 14:04:44.715795 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/45739e81-e946-42b2-9433-aeb0f5a8dc0f-config-data" (OuterVolumeSpecName: "config-data") pod "45739e81-e946-42b2-9433-aeb0f5a8dc0f" (UID: "45739e81-e946-42b2-9433-aeb0f5a8dc0f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:04:44 crc kubenswrapper[5133]: I1121 14:04:44.756391 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-24l2n\" (UniqueName: \"kubernetes.io/projected/45739e81-e946-42b2-9433-aeb0f5a8dc0f-kube-api-access-24l2n\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:44 crc kubenswrapper[5133]: I1121 14:04:44.756439 5133 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/45739e81-e946-42b2-9433-aeb0f5a8dc0f-logs\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:44 crc kubenswrapper[5133]: I1121 14:04:44.756451 5133 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/45739e81-e946-42b2-9433-aeb0f5a8dc0f-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:44 crc kubenswrapper[5133]: I1121 14:04:44.756463 5133 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/45739e81-e946-42b2-9433-aeb0f5a8dc0f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:44 crc kubenswrapper[5133]: I1121 14:04:44.756478 5133 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/45739e81-e946-42b2-9433-aeb0f5a8dc0f-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:44 crc kubenswrapper[5133]: I1121 14:04:44.756488 5133 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/45739e81-e946-42b2-9433-aeb0f5a8dc0f-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:44 crc kubenswrapper[5133]: I1121 14:04:44.881930 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6f698694b-sk8kx" event={"ID":"40cd1965-9315-4ed8-8902-4d9f77e63740","Type":"ContainerStarted","Data":"7804f1ea4c8ab99560a34a90048726c0dd2b15559f7052e3068372c53fcd8852"} Nov 21 14:04:44 crc kubenswrapper[5133]: I1121 14:04:44.882659 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6f698694b-sk8kx" event={"ID":"40cd1965-9315-4ed8-8902-4d9f77e63740","Type":"ContainerStarted","Data":"381fd6f0f06d5c8f962102ca5e540ce976a1622e7748cafef17d6418c496852c"} Nov 21 14:04:44 crc kubenswrapper[5133]: I1121 14:04:44.884777 5133 generic.go:334] "Generic (PLEG): container finished" podID="45739e81-e946-42b2-9433-aeb0f5a8dc0f" containerID="b1ea806462cb25fa9b6de93579d513e6f50ca1380c2d9f047abf0feb873e5afe" exitCode=0 Nov 21 14:04:44 crc kubenswrapper[5133]: I1121 14:04:44.884818 5133 generic.go:334] "Generic (PLEG): container finished" podID="45739e81-e946-42b2-9433-aeb0f5a8dc0f" containerID="edaa351693cd10b2b7431fa1aba1cda1e75d9dd9d45e1390487dc85390040439" exitCode=143 Nov 21 14:04:44 crc kubenswrapper[5133]: I1121 14:04:44.886163 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 21 14:04:44 crc kubenswrapper[5133]: I1121 14:04:44.886248 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"45739e81-e946-42b2-9433-aeb0f5a8dc0f","Type":"ContainerDied","Data":"b1ea806462cb25fa9b6de93579d513e6f50ca1380c2d9f047abf0feb873e5afe"} Nov 21 14:04:44 crc kubenswrapper[5133]: I1121 14:04:44.886349 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"45739e81-e946-42b2-9433-aeb0f5a8dc0f","Type":"ContainerDied","Data":"edaa351693cd10b2b7431fa1aba1cda1e75d9dd9d45e1390487dc85390040439"} Nov 21 14:04:44 crc kubenswrapper[5133]: I1121 14:04:44.886387 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"45739e81-e946-42b2-9433-aeb0f5a8dc0f","Type":"ContainerDied","Data":"e46390a0be42c46fc37e9ab0800c5d6193ed18609244d362711d5b9cee0e2f5d"} Nov 21 14:04:44 crc kubenswrapper[5133]: I1121 14:04:44.886395 5133 scope.go:117] "RemoveContainer" containerID="b1ea806462cb25fa9b6de93579d513e6f50ca1380c2d9f047abf0feb873e5afe" Nov 21 14:04:44 crc kubenswrapper[5133]: I1121 14:04:44.923198 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=5.184970169 podStartE2EDuration="6.923169308s" podCreationTimestamp="2025-11-21 14:04:38 +0000 UTC" firstStartedPulling="2025-11-21 14:04:39.862192484 +0000 UTC m=+1339.660024732" lastFinishedPulling="2025-11-21 14:04:41.600391623 +0000 UTC m=+1341.398223871" observedRunningTime="2025-11-21 14:04:44.913634923 +0000 UTC m=+1344.711467191" watchObservedRunningTime="2025-11-21 14:04:44.923169308 +0000 UTC m=+1344.721001556" Nov 21 14:04:44 crc kubenswrapper[5133]: I1121 14:04:44.931544 5133 scope.go:117] "RemoveContainer" containerID="edaa351693cd10b2b7431fa1aba1cda1e75d9dd9d45e1390487dc85390040439" Nov 21 14:04:44 crc kubenswrapper[5133]: I1121 14:04:44.947422 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 21 14:04:44 crc kubenswrapper[5133]: I1121 14:04:44.958105 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Nov 21 14:04:44 crc kubenswrapper[5133]: I1121 14:04:44.982623 5133 scope.go:117] "RemoveContainer" containerID="b1ea806462cb25fa9b6de93579d513e6f50ca1380c2d9f047abf0feb873e5afe" Nov 21 14:04:44 crc kubenswrapper[5133]: E1121 14:04:44.983814 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b1ea806462cb25fa9b6de93579d513e6f50ca1380c2d9f047abf0feb873e5afe\": container with ID starting with b1ea806462cb25fa9b6de93579d513e6f50ca1380c2d9f047abf0feb873e5afe not found: ID does not exist" containerID="b1ea806462cb25fa9b6de93579d513e6f50ca1380c2d9f047abf0feb873e5afe" Nov 21 14:04:44 crc kubenswrapper[5133]: I1121 14:04:44.983853 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b1ea806462cb25fa9b6de93579d513e6f50ca1380c2d9f047abf0feb873e5afe"} err="failed to get container status \"b1ea806462cb25fa9b6de93579d513e6f50ca1380c2d9f047abf0feb873e5afe\": rpc error: code = NotFound desc = could not find container \"b1ea806462cb25fa9b6de93579d513e6f50ca1380c2d9f047abf0feb873e5afe\": container with ID starting with b1ea806462cb25fa9b6de93579d513e6f50ca1380c2d9f047abf0feb873e5afe not found: ID does not exist" Nov 21 14:04:44 crc kubenswrapper[5133]: I1121 14:04:44.983884 5133 scope.go:117] "RemoveContainer" containerID="edaa351693cd10b2b7431fa1aba1cda1e75d9dd9d45e1390487dc85390040439" Nov 21 14:04:44 crc kubenswrapper[5133]: E1121 14:04:44.986644 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"edaa351693cd10b2b7431fa1aba1cda1e75d9dd9d45e1390487dc85390040439\": container with ID starting with edaa351693cd10b2b7431fa1aba1cda1e75d9dd9d45e1390487dc85390040439 not found: ID does not exist" containerID="edaa351693cd10b2b7431fa1aba1cda1e75d9dd9d45e1390487dc85390040439" Nov 21 14:04:44 crc kubenswrapper[5133]: I1121 14:04:44.986685 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"edaa351693cd10b2b7431fa1aba1cda1e75d9dd9d45e1390487dc85390040439"} err="failed to get container status \"edaa351693cd10b2b7431fa1aba1cda1e75d9dd9d45e1390487dc85390040439\": rpc error: code = NotFound desc = could not find container \"edaa351693cd10b2b7431fa1aba1cda1e75d9dd9d45e1390487dc85390040439\": container with ID starting with edaa351693cd10b2b7431fa1aba1cda1e75d9dd9d45e1390487dc85390040439 not found: ID does not exist" Nov 21 14:04:44 crc kubenswrapper[5133]: I1121 14:04:44.986714 5133 scope.go:117] "RemoveContainer" containerID="b1ea806462cb25fa9b6de93579d513e6f50ca1380c2d9f047abf0feb873e5afe" Nov 21 14:04:44 crc kubenswrapper[5133]: I1121 14:04:44.986791 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 21 14:04:44 crc kubenswrapper[5133]: E1121 14:04:44.988851 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45739e81-e946-42b2-9433-aeb0f5a8dc0f" containerName="cinder-api" Nov 21 14:04:44 crc kubenswrapper[5133]: I1121 14:04:44.988875 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="45739e81-e946-42b2-9433-aeb0f5a8dc0f" containerName="cinder-api" Nov 21 14:04:44 crc kubenswrapper[5133]: E1121 14:04:44.988902 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45739e81-e946-42b2-9433-aeb0f5a8dc0f" containerName="cinder-api-log" Nov 21 14:04:44 crc kubenswrapper[5133]: I1121 14:04:44.988910 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="45739e81-e946-42b2-9433-aeb0f5a8dc0f" containerName="cinder-api-log" Nov 21 14:04:44 crc kubenswrapper[5133]: I1121 14:04:44.989167 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="45739e81-e946-42b2-9433-aeb0f5a8dc0f" containerName="cinder-api" Nov 21 14:04:44 crc kubenswrapper[5133]: I1121 14:04:44.989187 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="45739e81-e946-42b2-9433-aeb0f5a8dc0f" containerName="cinder-api-log" Nov 21 14:04:44 crc kubenswrapper[5133]: I1121 14:04:44.991029 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b1ea806462cb25fa9b6de93579d513e6f50ca1380c2d9f047abf0feb873e5afe"} err="failed to get container status \"b1ea806462cb25fa9b6de93579d513e6f50ca1380c2d9f047abf0feb873e5afe\": rpc error: code = NotFound desc = could not find container \"b1ea806462cb25fa9b6de93579d513e6f50ca1380c2d9f047abf0feb873e5afe\": container with ID starting with b1ea806462cb25fa9b6de93579d513e6f50ca1380c2d9f047abf0feb873e5afe not found: ID does not exist" Nov 21 14:04:44 crc kubenswrapper[5133]: I1121 14:04:44.991066 5133 scope.go:117] "RemoveContainer" containerID="edaa351693cd10b2b7431fa1aba1cda1e75d9dd9d45e1390487dc85390040439" Nov 21 14:04:44 crc kubenswrapper[5133]: I1121 14:04:44.991301 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"edaa351693cd10b2b7431fa1aba1cda1e75d9dd9d45e1390487dc85390040439"} err="failed to get container status \"edaa351693cd10b2b7431fa1aba1cda1e75d9dd9d45e1390487dc85390040439\": rpc error: code = NotFound desc = could not find container \"edaa351693cd10b2b7431fa1aba1cda1e75d9dd9d45e1390487dc85390040439\": container with ID starting with edaa351693cd10b2b7431fa1aba1cda1e75d9dd9d45e1390487dc85390040439 not found: ID does not exist" Nov 21 14:04:44 crc kubenswrapper[5133]: I1121 14:04:44.992135 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 21 14:04:44 crc kubenswrapper[5133]: I1121 14:04:44.997209 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Nov 21 14:04:44 crc kubenswrapper[5133]: I1121 14:04:44.997524 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Nov 21 14:04:44 crc kubenswrapper[5133]: I1121 14:04:44.997612 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 21 14:04:45 crc kubenswrapper[5133]: I1121 14:04:45.004503 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 21 14:04:45 crc kubenswrapper[5133]: I1121 14:04:45.163452 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/afc9fa06-90c1-49de-b5aa-9b10586a82b5-scripts\") pod \"cinder-api-0\" (UID: \"afc9fa06-90c1-49de-b5aa-9b10586a82b5\") " pod="openstack/cinder-api-0" Nov 21 14:04:45 crc kubenswrapper[5133]: I1121 14:04:45.163518 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/afc9fa06-90c1-49de-b5aa-9b10586a82b5-config-data-custom\") pod \"cinder-api-0\" (UID: \"afc9fa06-90c1-49de-b5aa-9b10586a82b5\") " pod="openstack/cinder-api-0" Nov 21 14:04:45 crc kubenswrapper[5133]: I1121 14:04:45.163793 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/afc9fa06-90c1-49de-b5aa-9b10586a82b5-etc-machine-id\") pod \"cinder-api-0\" (UID: \"afc9fa06-90c1-49de-b5aa-9b10586a82b5\") " pod="openstack/cinder-api-0" Nov 21 14:04:45 crc kubenswrapper[5133]: I1121 14:04:45.163873 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/afc9fa06-90c1-49de-b5aa-9b10586a82b5-config-data\") pod \"cinder-api-0\" (UID: \"afc9fa06-90c1-49de-b5aa-9b10586a82b5\") " pod="openstack/cinder-api-0" Nov 21 14:04:45 crc kubenswrapper[5133]: I1121 14:04:45.163979 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/afc9fa06-90c1-49de-b5aa-9b10586a82b5-logs\") pod \"cinder-api-0\" (UID: \"afc9fa06-90c1-49de-b5aa-9b10586a82b5\") " pod="openstack/cinder-api-0" Nov 21 14:04:45 crc kubenswrapper[5133]: I1121 14:04:45.164264 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/afc9fa06-90c1-49de-b5aa-9b10586a82b5-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"afc9fa06-90c1-49de-b5aa-9b10586a82b5\") " pod="openstack/cinder-api-0" Nov 21 14:04:45 crc kubenswrapper[5133]: I1121 14:04:45.164433 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/afc9fa06-90c1-49de-b5aa-9b10586a82b5-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"afc9fa06-90c1-49de-b5aa-9b10586a82b5\") " pod="openstack/cinder-api-0" Nov 21 14:04:45 crc kubenswrapper[5133]: I1121 14:04:45.164541 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/afc9fa06-90c1-49de-b5aa-9b10586a82b5-public-tls-certs\") pod \"cinder-api-0\" (UID: \"afc9fa06-90c1-49de-b5aa-9b10586a82b5\") " pod="openstack/cinder-api-0" Nov 21 14:04:45 crc kubenswrapper[5133]: I1121 14:04:45.164607 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pxsz4\" (UniqueName: \"kubernetes.io/projected/afc9fa06-90c1-49de-b5aa-9b10586a82b5-kube-api-access-pxsz4\") pod \"cinder-api-0\" (UID: \"afc9fa06-90c1-49de-b5aa-9b10586a82b5\") " pod="openstack/cinder-api-0" Nov 21 14:04:45 crc kubenswrapper[5133]: I1121 14:04:45.267398 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/afc9fa06-90c1-49de-b5aa-9b10586a82b5-scripts\") pod \"cinder-api-0\" (UID: \"afc9fa06-90c1-49de-b5aa-9b10586a82b5\") " pod="openstack/cinder-api-0" Nov 21 14:04:45 crc kubenswrapper[5133]: I1121 14:04:45.267472 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/afc9fa06-90c1-49de-b5aa-9b10586a82b5-config-data-custom\") pod \"cinder-api-0\" (UID: \"afc9fa06-90c1-49de-b5aa-9b10586a82b5\") " pod="openstack/cinder-api-0" Nov 21 14:04:45 crc kubenswrapper[5133]: I1121 14:04:45.267532 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/afc9fa06-90c1-49de-b5aa-9b10586a82b5-etc-machine-id\") pod \"cinder-api-0\" (UID: \"afc9fa06-90c1-49de-b5aa-9b10586a82b5\") " pod="openstack/cinder-api-0" Nov 21 14:04:45 crc kubenswrapper[5133]: I1121 14:04:45.267556 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/afc9fa06-90c1-49de-b5aa-9b10586a82b5-config-data\") pod \"cinder-api-0\" (UID: \"afc9fa06-90c1-49de-b5aa-9b10586a82b5\") " pod="openstack/cinder-api-0" Nov 21 14:04:45 crc kubenswrapper[5133]: I1121 14:04:45.267586 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/afc9fa06-90c1-49de-b5aa-9b10586a82b5-logs\") pod \"cinder-api-0\" (UID: \"afc9fa06-90c1-49de-b5aa-9b10586a82b5\") " pod="openstack/cinder-api-0" Nov 21 14:04:45 crc kubenswrapper[5133]: I1121 14:04:45.267646 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/afc9fa06-90c1-49de-b5aa-9b10586a82b5-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"afc9fa06-90c1-49de-b5aa-9b10586a82b5\") " pod="openstack/cinder-api-0" Nov 21 14:04:45 crc kubenswrapper[5133]: I1121 14:04:45.267672 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/afc9fa06-90c1-49de-b5aa-9b10586a82b5-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"afc9fa06-90c1-49de-b5aa-9b10586a82b5\") " pod="openstack/cinder-api-0" Nov 21 14:04:45 crc kubenswrapper[5133]: I1121 14:04:45.267704 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/afc9fa06-90c1-49de-b5aa-9b10586a82b5-public-tls-certs\") pod \"cinder-api-0\" (UID: \"afc9fa06-90c1-49de-b5aa-9b10586a82b5\") " pod="openstack/cinder-api-0" Nov 21 14:04:45 crc kubenswrapper[5133]: I1121 14:04:45.267736 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pxsz4\" (UniqueName: \"kubernetes.io/projected/afc9fa06-90c1-49de-b5aa-9b10586a82b5-kube-api-access-pxsz4\") pod \"cinder-api-0\" (UID: \"afc9fa06-90c1-49de-b5aa-9b10586a82b5\") " pod="openstack/cinder-api-0" Nov 21 14:04:45 crc kubenswrapper[5133]: I1121 14:04:45.269333 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/afc9fa06-90c1-49de-b5aa-9b10586a82b5-logs\") pod \"cinder-api-0\" (UID: \"afc9fa06-90c1-49de-b5aa-9b10586a82b5\") " pod="openstack/cinder-api-0" Nov 21 14:04:45 crc kubenswrapper[5133]: I1121 14:04:45.269753 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/afc9fa06-90c1-49de-b5aa-9b10586a82b5-etc-machine-id\") pod \"cinder-api-0\" (UID: \"afc9fa06-90c1-49de-b5aa-9b10586a82b5\") " pod="openstack/cinder-api-0" Nov 21 14:04:45 crc kubenswrapper[5133]: I1121 14:04:45.286621 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/afc9fa06-90c1-49de-b5aa-9b10586a82b5-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"afc9fa06-90c1-49de-b5aa-9b10586a82b5\") " pod="openstack/cinder-api-0" Nov 21 14:04:45 crc kubenswrapper[5133]: I1121 14:04:45.287226 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/afc9fa06-90c1-49de-b5aa-9b10586a82b5-config-data\") pod \"cinder-api-0\" (UID: \"afc9fa06-90c1-49de-b5aa-9b10586a82b5\") " pod="openstack/cinder-api-0" Nov 21 14:04:45 crc kubenswrapper[5133]: I1121 14:04:45.288340 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/afc9fa06-90c1-49de-b5aa-9b10586a82b5-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"afc9fa06-90c1-49de-b5aa-9b10586a82b5\") " pod="openstack/cinder-api-0" Nov 21 14:04:45 crc kubenswrapper[5133]: I1121 14:04:45.288881 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/afc9fa06-90c1-49de-b5aa-9b10586a82b5-config-data-custom\") pod \"cinder-api-0\" (UID: \"afc9fa06-90c1-49de-b5aa-9b10586a82b5\") " pod="openstack/cinder-api-0" Nov 21 14:04:45 crc kubenswrapper[5133]: I1121 14:04:45.289152 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/afc9fa06-90c1-49de-b5aa-9b10586a82b5-scripts\") pod \"cinder-api-0\" (UID: \"afc9fa06-90c1-49de-b5aa-9b10586a82b5\") " pod="openstack/cinder-api-0" Nov 21 14:04:45 crc kubenswrapper[5133]: I1121 14:04:45.289280 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/afc9fa06-90c1-49de-b5aa-9b10586a82b5-public-tls-certs\") pod \"cinder-api-0\" (UID: \"afc9fa06-90c1-49de-b5aa-9b10586a82b5\") " pod="openstack/cinder-api-0" Nov 21 14:04:45 crc kubenswrapper[5133]: I1121 14:04:45.292684 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pxsz4\" (UniqueName: \"kubernetes.io/projected/afc9fa06-90c1-49de-b5aa-9b10586a82b5-kube-api-access-pxsz4\") pod \"cinder-api-0\" (UID: \"afc9fa06-90c1-49de-b5aa-9b10586a82b5\") " pod="openstack/cinder-api-0" Nov 21 14:04:45 crc kubenswrapper[5133]: I1121 14:04:45.332139 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 21 14:04:45 crc kubenswrapper[5133]: I1121 14:04:45.851664 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 21 14:04:45 crc kubenswrapper[5133]: W1121 14:04:45.852217 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podafc9fa06_90c1_49de_b5aa_9b10586a82b5.slice/crio-c2a4343e2472da1f1cc51c9183ae823ab31bf7ac44ffac7fe2c182f41026fa7a WatchSource:0}: Error finding container c2a4343e2472da1f1cc51c9183ae823ab31bf7ac44ffac7fe2c182f41026fa7a: Status 404 returned error can't find the container with id c2a4343e2472da1f1cc51c9183ae823ab31bf7ac44ffac7fe2c182f41026fa7a Nov 21 14:04:45 crc kubenswrapper[5133]: I1121 14:04:45.898172 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"afc9fa06-90c1-49de-b5aa-9b10586a82b5","Type":"ContainerStarted","Data":"c2a4343e2472da1f1cc51c9183ae823ab31bf7ac44ffac7fe2c182f41026fa7a"} Nov 21 14:04:45 crc kubenswrapper[5133]: I1121 14:04:45.902795 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6f698694b-sk8kx" event={"ID":"40cd1965-9315-4ed8-8902-4d9f77e63740","Type":"ContainerStarted","Data":"0e235f00b639a1f8f32692ccaf236d700d388a56cb9f1c3a364b113ed6184b2c"} Nov 21 14:04:45 crc kubenswrapper[5133]: I1121 14:04:45.903838 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-6f698694b-sk8kx" Nov 21 14:04:45 crc kubenswrapper[5133]: I1121 14:04:45.903868 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-6f698694b-sk8kx" Nov 21 14:04:45 crc kubenswrapper[5133]: I1121 14:04:45.938821 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-6f698694b-sk8kx" podStartSLOduration=2.938800447 podStartE2EDuration="2.938800447s" podCreationTimestamp="2025-11-21 14:04:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 14:04:45.931355138 +0000 UTC m=+1345.729187386" watchObservedRunningTime="2025-11-21 14:04:45.938800447 +0000 UTC m=+1345.736632695" Nov 21 14:04:46 crc kubenswrapper[5133]: I1121 14:04:46.470543 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="45739e81-e946-42b2-9433-aeb0f5a8dc0f" path="/var/lib/kubelet/pods/45739e81-e946-42b2-9433-aeb0f5a8dc0f/volumes" Nov 21 14:04:46 crc kubenswrapper[5133]: I1121 14:04:46.916860 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"afc9fa06-90c1-49de-b5aa-9b10586a82b5","Type":"ContainerStarted","Data":"f9350a6df0735fbf0ea6169889eec6270e66948dbadee7e17fd93402a9d3f728"} Nov 21 14:04:47 crc kubenswrapper[5133]: I1121 14:04:47.934579 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"afc9fa06-90c1-49de-b5aa-9b10586a82b5","Type":"ContainerStarted","Data":"ce6074dd4f6aab4a1fa70a610383498a6d681fb9d103a5ae0549cbf2ae35ef58"} Nov 21 14:04:47 crc kubenswrapper[5133]: I1121 14:04:47.982430 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=3.982399953 podStartE2EDuration="3.982399953s" podCreationTimestamp="2025-11-21 14:04:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 14:04:47.968981715 +0000 UTC m=+1347.766813993" watchObservedRunningTime="2025-11-21 14:04:47.982399953 +0000 UTC m=+1347.780232201" Nov 21 14:04:48 crc kubenswrapper[5133]: I1121 14:04:48.952674 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 21 14:04:49 crc kubenswrapper[5133]: I1121 14:04:49.303439 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-58db5546cc-m2zdv" Nov 21 14:04:49 crc kubenswrapper[5133]: I1121 14:04:49.365534 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 21 14:04:49 crc kubenswrapper[5133]: I1121 14:04:49.397865 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5f66db59b9-q57jg"] Nov 21 14:04:49 crc kubenswrapper[5133]: I1121 14:04:49.399596 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5f66db59b9-q57jg" podUID="aa798184-6ef5-4fe7-8dd7-6454c7058f49" containerName="dnsmasq-dns" containerID="cri-o://b8cc64d0b1a05ae86806e632c4fcb231fe16b789bccb7c2c777659f48764bb03" gracePeriod=10 Nov 21 14:04:49 crc kubenswrapper[5133]: I1121 14:04:49.826397 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 21 14:04:49 crc kubenswrapper[5133]: I1121 14:04:49.971956 5133 generic.go:334] "Generic (PLEG): container finished" podID="aa798184-6ef5-4fe7-8dd7-6454c7058f49" containerID="b8cc64d0b1a05ae86806e632c4fcb231fe16b789bccb7c2c777659f48764bb03" exitCode=0 Nov 21 14:04:49 crc kubenswrapper[5133]: I1121 14:04:49.972358 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f66db59b9-q57jg" event={"ID":"aa798184-6ef5-4fe7-8dd7-6454c7058f49","Type":"ContainerDied","Data":"b8cc64d0b1a05ae86806e632c4fcb231fe16b789bccb7c2c777659f48764bb03"} Nov 21 14:04:49 crc kubenswrapper[5133]: I1121 14:04:49.994627 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-585c9d5854-m4bwc" Nov 21 14:04:50 crc kubenswrapper[5133]: I1121 14:04:50.061919 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 21 14:04:50 crc kubenswrapper[5133]: I1121 14:04:50.063161 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f66db59b9-q57jg" Nov 21 14:04:50 crc kubenswrapper[5133]: I1121 14:04:50.081242 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-55d9467df6-xwjg4" Nov 21 14:04:50 crc kubenswrapper[5133]: I1121 14:04:50.085013 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-585c9d5854-m4bwc" Nov 21 14:04:50 crc kubenswrapper[5133]: I1121 14:04:50.182820 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/aa798184-6ef5-4fe7-8dd7-6454c7058f49-ovsdbserver-nb\") pod \"aa798184-6ef5-4fe7-8dd7-6454c7058f49\" (UID: \"aa798184-6ef5-4fe7-8dd7-6454c7058f49\") " Nov 21 14:04:50 crc kubenswrapper[5133]: I1121 14:04:50.182868 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-55d9467df6-xwjg4" Nov 21 14:04:50 crc kubenswrapper[5133]: I1121 14:04:50.182932 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/aa798184-6ef5-4fe7-8dd7-6454c7058f49-ovsdbserver-sb\") pod \"aa798184-6ef5-4fe7-8dd7-6454c7058f49\" (UID: \"aa798184-6ef5-4fe7-8dd7-6454c7058f49\") " Nov 21 14:04:50 crc kubenswrapper[5133]: I1121 14:04:50.183012 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aa798184-6ef5-4fe7-8dd7-6454c7058f49-config\") pod \"aa798184-6ef5-4fe7-8dd7-6454c7058f49\" (UID: \"aa798184-6ef5-4fe7-8dd7-6454c7058f49\") " Nov 21 14:04:50 crc kubenswrapper[5133]: I1121 14:04:50.183046 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7bgkt\" (UniqueName: \"kubernetes.io/projected/aa798184-6ef5-4fe7-8dd7-6454c7058f49-kube-api-access-7bgkt\") pod \"aa798184-6ef5-4fe7-8dd7-6454c7058f49\" (UID: \"aa798184-6ef5-4fe7-8dd7-6454c7058f49\") " Nov 21 14:04:50 crc kubenswrapper[5133]: I1121 14:04:50.183086 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/aa798184-6ef5-4fe7-8dd7-6454c7058f49-dns-svc\") pod \"aa798184-6ef5-4fe7-8dd7-6454c7058f49\" (UID: \"aa798184-6ef5-4fe7-8dd7-6454c7058f49\") " Nov 21 14:04:50 crc kubenswrapper[5133]: I1121 14:04:50.213205 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aa798184-6ef5-4fe7-8dd7-6454c7058f49-kube-api-access-7bgkt" (OuterVolumeSpecName: "kube-api-access-7bgkt") pod "aa798184-6ef5-4fe7-8dd7-6454c7058f49" (UID: "aa798184-6ef5-4fe7-8dd7-6454c7058f49"). InnerVolumeSpecName "kube-api-access-7bgkt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:04:50 crc kubenswrapper[5133]: I1121 14:04:50.287141 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7bgkt\" (UniqueName: \"kubernetes.io/projected/aa798184-6ef5-4fe7-8dd7-6454c7058f49-kube-api-access-7bgkt\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:50 crc kubenswrapper[5133]: I1121 14:04:50.292958 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aa798184-6ef5-4fe7-8dd7-6454c7058f49-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "aa798184-6ef5-4fe7-8dd7-6454c7058f49" (UID: "aa798184-6ef5-4fe7-8dd7-6454c7058f49"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:04:50 crc kubenswrapper[5133]: I1121 14:04:50.313731 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aa798184-6ef5-4fe7-8dd7-6454c7058f49-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "aa798184-6ef5-4fe7-8dd7-6454c7058f49" (UID: "aa798184-6ef5-4fe7-8dd7-6454c7058f49"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:04:50 crc kubenswrapper[5133]: I1121 14:04:50.314327 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aa798184-6ef5-4fe7-8dd7-6454c7058f49-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "aa798184-6ef5-4fe7-8dd7-6454c7058f49" (UID: "aa798184-6ef5-4fe7-8dd7-6454c7058f49"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:04:50 crc kubenswrapper[5133]: I1121 14:04:50.341721 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aa798184-6ef5-4fe7-8dd7-6454c7058f49-config" (OuterVolumeSpecName: "config") pod "aa798184-6ef5-4fe7-8dd7-6454c7058f49" (UID: "aa798184-6ef5-4fe7-8dd7-6454c7058f49"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:04:50 crc kubenswrapper[5133]: I1121 14:04:50.398092 5133 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/aa798184-6ef5-4fe7-8dd7-6454c7058f49-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:50 crc kubenswrapper[5133]: I1121 14:04:50.398137 5133 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/aa798184-6ef5-4fe7-8dd7-6454c7058f49-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:50 crc kubenswrapper[5133]: I1121 14:04:50.398153 5133 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aa798184-6ef5-4fe7-8dd7-6454c7058f49-config\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:50 crc kubenswrapper[5133]: I1121 14:04:50.398166 5133 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/aa798184-6ef5-4fe7-8dd7-6454c7058f49-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:50 crc kubenswrapper[5133]: I1121 14:04:50.856622 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-f84bff9cd-skfwq" Nov 21 14:04:50 crc kubenswrapper[5133]: I1121 14:04:50.920760 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-6f698694b-sk8kx" Nov 21 14:04:50 crc kubenswrapper[5133]: I1121 14:04:50.988264 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f66db59b9-q57jg" Nov 21 14:04:50 crc kubenswrapper[5133]: I1121 14:04:50.988695 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f66db59b9-q57jg" event={"ID":"aa798184-6ef5-4fe7-8dd7-6454c7058f49","Type":"ContainerDied","Data":"e8c507abbf7d084a45d8e10d6b3deeab13496d6dacc1664c5854acf7480561db"} Nov 21 14:04:50 crc kubenswrapper[5133]: I1121 14:04:50.988731 5133 scope.go:117] "RemoveContainer" containerID="b8cc64d0b1a05ae86806e632c4fcb231fe16b789bccb7c2c777659f48764bb03" Nov 21 14:04:50 crc kubenswrapper[5133]: I1121 14:04:50.988964 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="1a96d392-2cf2-44c6-94d0-5df6fc14c4d1" containerName="cinder-scheduler" containerID="cri-o://cfe08cd6dc443799fe9db2f54ab378374873da8c059ebd32e58e0b4023c9ab1c" gracePeriod=30 Nov 21 14:04:50 crc kubenswrapper[5133]: I1121 14:04:50.989682 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="1a96d392-2cf2-44c6-94d0-5df6fc14c4d1" containerName="probe" containerID="cri-o://17ace0b59af117302ec2a72213954bdcf8474ff0be18df7894b3886c583675ef" gracePeriod=30 Nov 21 14:04:51 crc kubenswrapper[5133]: I1121 14:04:51.033632 5133 scope.go:117] "RemoveContainer" containerID="b16f5d43cf3fa8a6b30e26dc3094af70cd2c5abe6f365a65f677f166eb05bc2e" Nov 21 14:04:51 crc kubenswrapper[5133]: I1121 14:04:51.057073 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5f66db59b9-q57jg"] Nov 21 14:04:51 crc kubenswrapper[5133]: I1121 14:04:51.093668 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5f66db59b9-q57jg"] Nov 21 14:04:52 crc kubenswrapper[5133]: I1121 14:04:52.009211 5133 generic.go:334] "Generic (PLEG): container finished" podID="1a96d392-2cf2-44c6-94d0-5df6fc14c4d1" containerID="17ace0b59af117302ec2a72213954bdcf8474ff0be18df7894b3886c583675ef" exitCode=0 Nov 21 14:04:52 crc kubenswrapper[5133]: I1121 14:04:52.009263 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"1a96d392-2cf2-44c6-94d0-5df6fc14c4d1","Type":"ContainerDied","Data":"17ace0b59af117302ec2a72213954bdcf8474ff0be18df7894b3886c583675ef"} Nov 21 14:04:52 crc kubenswrapper[5133]: I1121 14:04:52.469625 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aa798184-6ef5-4fe7-8dd7-6454c7058f49" path="/var/lib/kubelet/pods/aa798184-6ef5-4fe7-8dd7-6454c7058f49/volumes" Nov 21 14:04:52 crc kubenswrapper[5133]: I1121 14:04:52.632892 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-6f698694b-sk8kx" Nov 21 14:04:52 crc kubenswrapper[5133]: I1121 14:04:52.715621 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-585c9d5854-m4bwc"] Nov 21 14:04:52 crc kubenswrapper[5133]: I1121 14:04:52.716140 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-585c9d5854-m4bwc" podUID="3b99cb93-e45a-4c5a-a449-eafa3241be56" containerName="barbican-api-log" containerID="cri-o://189352f52a99ca5f194736c700010e6bc79bd231f68591b8aeee19fc2286688b" gracePeriod=30 Nov 21 14:04:52 crc kubenswrapper[5133]: I1121 14:04:52.716315 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-585c9d5854-m4bwc" podUID="3b99cb93-e45a-4c5a-a449-eafa3241be56" containerName="barbican-api" containerID="cri-o://fc4f79f344f6ec1b3dfae79168535a9af52a0cf9572a1a4c45c62162d45e0d7b" gracePeriod=30 Nov 21 14:04:53 crc kubenswrapper[5133]: I1121 14:04:53.065284 5133 generic.go:334] "Generic (PLEG): container finished" podID="3b99cb93-e45a-4c5a-a449-eafa3241be56" containerID="189352f52a99ca5f194736c700010e6bc79bd231f68591b8aeee19fc2286688b" exitCode=143 Nov 21 14:04:53 crc kubenswrapper[5133]: I1121 14:04:53.065353 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-585c9d5854-m4bwc" event={"ID":"3b99cb93-e45a-4c5a-a449-eafa3241be56","Type":"ContainerDied","Data":"189352f52a99ca5f194736c700010e6bc79bd231f68591b8aeee19fc2286688b"} Nov 21 14:04:54 crc kubenswrapper[5133]: I1121 14:04:54.628610 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 21 14:04:54 crc kubenswrapper[5133]: I1121 14:04:54.698756 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a96d392-2cf2-44c6-94d0-5df6fc14c4d1-combined-ca-bundle\") pod \"1a96d392-2cf2-44c6-94d0-5df6fc14c4d1\" (UID: \"1a96d392-2cf2-44c6-94d0-5df6fc14c4d1\") " Nov 21 14:04:54 crc kubenswrapper[5133]: I1121 14:04:54.698848 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fph4d\" (UniqueName: \"kubernetes.io/projected/1a96d392-2cf2-44c6-94d0-5df6fc14c4d1-kube-api-access-fph4d\") pod \"1a96d392-2cf2-44c6-94d0-5df6fc14c4d1\" (UID: \"1a96d392-2cf2-44c6-94d0-5df6fc14c4d1\") " Nov 21 14:04:54 crc kubenswrapper[5133]: I1121 14:04:54.698895 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1a96d392-2cf2-44c6-94d0-5df6fc14c4d1-scripts\") pod \"1a96d392-2cf2-44c6-94d0-5df6fc14c4d1\" (UID: \"1a96d392-2cf2-44c6-94d0-5df6fc14c4d1\") " Nov 21 14:04:54 crc kubenswrapper[5133]: I1121 14:04:54.698990 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1a96d392-2cf2-44c6-94d0-5df6fc14c4d1-etc-machine-id\") pod \"1a96d392-2cf2-44c6-94d0-5df6fc14c4d1\" (UID: \"1a96d392-2cf2-44c6-94d0-5df6fc14c4d1\") " Nov 21 14:04:54 crc kubenswrapper[5133]: I1121 14:04:54.699169 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1a96d392-2cf2-44c6-94d0-5df6fc14c4d1-config-data-custom\") pod \"1a96d392-2cf2-44c6-94d0-5df6fc14c4d1\" (UID: \"1a96d392-2cf2-44c6-94d0-5df6fc14c4d1\") " Nov 21 14:04:54 crc kubenswrapper[5133]: I1121 14:04:54.699232 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1a96d392-2cf2-44c6-94d0-5df6fc14c4d1-config-data\") pod \"1a96d392-2cf2-44c6-94d0-5df6fc14c4d1\" (UID: \"1a96d392-2cf2-44c6-94d0-5df6fc14c4d1\") " Nov 21 14:04:54 crc kubenswrapper[5133]: I1121 14:04:54.702126 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1a96d392-2cf2-44c6-94d0-5df6fc14c4d1-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "1a96d392-2cf2-44c6-94d0-5df6fc14c4d1" (UID: "1a96d392-2cf2-44c6-94d0-5df6fc14c4d1"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 14:04:54 crc kubenswrapper[5133]: I1121 14:04:54.706491 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 21 14:04:54 crc kubenswrapper[5133]: E1121 14:04:54.707115 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aa798184-6ef5-4fe7-8dd7-6454c7058f49" containerName="dnsmasq-dns" Nov 21 14:04:54 crc kubenswrapper[5133]: I1121 14:04:54.707147 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="aa798184-6ef5-4fe7-8dd7-6454c7058f49" containerName="dnsmasq-dns" Nov 21 14:04:54 crc kubenswrapper[5133]: E1121 14:04:54.707188 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aa798184-6ef5-4fe7-8dd7-6454c7058f49" containerName="init" Nov 21 14:04:54 crc kubenswrapper[5133]: I1121 14:04:54.707201 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="aa798184-6ef5-4fe7-8dd7-6454c7058f49" containerName="init" Nov 21 14:04:54 crc kubenswrapper[5133]: E1121 14:04:54.707222 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a96d392-2cf2-44c6-94d0-5df6fc14c4d1" containerName="probe" Nov 21 14:04:54 crc kubenswrapper[5133]: I1121 14:04:54.707230 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a96d392-2cf2-44c6-94d0-5df6fc14c4d1" containerName="probe" Nov 21 14:04:54 crc kubenswrapper[5133]: E1121 14:04:54.707262 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a96d392-2cf2-44c6-94d0-5df6fc14c4d1" containerName="cinder-scheduler" Nov 21 14:04:54 crc kubenswrapper[5133]: I1121 14:04:54.707274 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a96d392-2cf2-44c6-94d0-5df6fc14c4d1" containerName="cinder-scheduler" Nov 21 14:04:54 crc kubenswrapper[5133]: I1121 14:04:54.707547 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="1a96d392-2cf2-44c6-94d0-5df6fc14c4d1" containerName="cinder-scheduler" Nov 21 14:04:54 crc kubenswrapper[5133]: I1121 14:04:54.707567 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="aa798184-6ef5-4fe7-8dd7-6454c7058f49" containerName="dnsmasq-dns" Nov 21 14:04:54 crc kubenswrapper[5133]: I1121 14:04:54.707581 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="1a96d392-2cf2-44c6-94d0-5df6fc14c4d1" containerName="probe" Nov 21 14:04:54 crc kubenswrapper[5133]: I1121 14:04:54.708278 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 21 14:04:54 crc kubenswrapper[5133]: I1121 14:04:54.719563 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Nov 21 14:04:54 crc kubenswrapper[5133]: I1121 14:04:54.719859 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-k8rnx" Nov 21 14:04:54 crc kubenswrapper[5133]: I1121 14:04:54.720405 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Nov 21 14:04:54 crc kubenswrapper[5133]: I1121 14:04:54.721154 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1a96d392-2cf2-44c6-94d0-5df6fc14c4d1-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "1a96d392-2cf2-44c6-94d0-5df6fc14c4d1" (UID: "1a96d392-2cf2-44c6-94d0-5df6fc14c4d1"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:04:54 crc kubenswrapper[5133]: I1121 14:04:54.721296 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1a96d392-2cf2-44c6-94d0-5df6fc14c4d1-kube-api-access-fph4d" (OuterVolumeSpecName: "kube-api-access-fph4d") pod "1a96d392-2cf2-44c6-94d0-5df6fc14c4d1" (UID: "1a96d392-2cf2-44c6-94d0-5df6fc14c4d1"). InnerVolumeSpecName "kube-api-access-fph4d". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:04:54 crc kubenswrapper[5133]: I1121 14:04:54.725462 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 21 14:04:54 crc kubenswrapper[5133]: I1121 14:04:54.728113 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1a96d392-2cf2-44c6-94d0-5df6fc14c4d1-scripts" (OuterVolumeSpecName: "scripts") pod "1a96d392-2cf2-44c6-94d0-5df6fc14c4d1" (UID: "1a96d392-2cf2-44c6-94d0-5df6fc14c4d1"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:04:54 crc kubenswrapper[5133]: I1121 14:04:54.803452 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/d2c71aef-bcfe-42ba-8fcc-4fd36400f190-openstack-config\") pod \"openstackclient\" (UID: \"d2c71aef-bcfe-42ba-8fcc-4fd36400f190\") " pod="openstack/openstackclient" Nov 21 14:04:54 crc kubenswrapper[5133]: I1121 14:04:54.803542 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d2c71aef-bcfe-42ba-8fcc-4fd36400f190-combined-ca-bundle\") pod \"openstackclient\" (UID: \"d2c71aef-bcfe-42ba-8fcc-4fd36400f190\") " pod="openstack/openstackclient" Nov 21 14:04:54 crc kubenswrapper[5133]: I1121 14:04:54.803585 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kgrfl\" (UniqueName: \"kubernetes.io/projected/d2c71aef-bcfe-42ba-8fcc-4fd36400f190-kube-api-access-kgrfl\") pod \"openstackclient\" (UID: \"d2c71aef-bcfe-42ba-8fcc-4fd36400f190\") " pod="openstack/openstackclient" Nov 21 14:04:54 crc kubenswrapper[5133]: I1121 14:04:54.804354 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/d2c71aef-bcfe-42ba-8fcc-4fd36400f190-openstack-config-secret\") pod \"openstackclient\" (UID: \"d2c71aef-bcfe-42ba-8fcc-4fd36400f190\") " pod="openstack/openstackclient" Nov 21 14:04:54 crc kubenswrapper[5133]: I1121 14:04:54.804430 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fph4d\" (UniqueName: \"kubernetes.io/projected/1a96d392-2cf2-44c6-94d0-5df6fc14c4d1-kube-api-access-fph4d\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:54 crc kubenswrapper[5133]: I1121 14:04:54.804441 5133 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1a96d392-2cf2-44c6-94d0-5df6fc14c4d1-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:54 crc kubenswrapper[5133]: I1121 14:04:54.804451 5133 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1a96d392-2cf2-44c6-94d0-5df6fc14c4d1-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:54 crc kubenswrapper[5133]: I1121 14:04:54.804460 5133 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1a96d392-2cf2-44c6-94d0-5df6fc14c4d1-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:54 crc kubenswrapper[5133]: I1121 14:04:54.819518 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1a96d392-2cf2-44c6-94d0-5df6fc14c4d1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1a96d392-2cf2-44c6-94d0-5df6fc14c4d1" (UID: "1a96d392-2cf2-44c6-94d0-5df6fc14c4d1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:04:54 crc kubenswrapper[5133]: I1121 14:04:54.860085 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1a96d392-2cf2-44c6-94d0-5df6fc14c4d1-config-data" (OuterVolumeSpecName: "config-data") pod "1a96d392-2cf2-44c6-94d0-5df6fc14c4d1" (UID: "1a96d392-2cf2-44c6-94d0-5df6fc14c4d1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:04:54 crc kubenswrapper[5133]: I1121 14:04:54.905777 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/d2c71aef-bcfe-42ba-8fcc-4fd36400f190-openstack-config-secret\") pod \"openstackclient\" (UID: \"d2c71aef-bcfe-42ba-8fcc-4fd36400f190\") " pod="openstack/openstackclient" Nov 21 14:04:54 crc kubenswrapper[5133]: I1121 14:04:54.905851 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/d2c71aef-bcfe-42ba-8fcc-4fd36400f190-openstack-config\") pod \"openstackclient\" (UID: \"d2c71aef-bcfe-42ba-8fcc-4fd36400f190\") " pod="openstack/openstackclient" Nov 21 14:04:54 crc kubenswrapper[5133]: I1121 14:04:54.905871 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d2c71aef-bcfe-42ba-8fcc-4fd36400f190-combined-ca-bundle\") pod \"openstackclient\" (UID: \"d2c71aef-bcfe-42ba-8fcc-4fd36400f190\") " pod="openstack/openstackclient" Nov 21 14:04:54 crc kubenswrapper[5133]: I1121 14:04:54.905900 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kgrfl\" (UniqueName: \"kubernetes.io/projected/d2c71aef-bcfe-42ba-8fcc-4fd36400f190-kube-api-access-kgrfl\") pod \"openstackclient\" (UID: \"d2c71aef-bcfe-42ba-8fcc-4fd36400f190\") " pod="openstack/openstackclient" Nov 21 14:04:54 crc kubenswrapper[5133]: I1121 14:04:54.905970 5133 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1a96d392-2cf2-44c6-94d0-5df6fc14c4d1-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:54 crc kubenswrapper[5133]: I1121 14:04:54.905983 5133 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a96d392-2cf2-44c6-94d0-5df6fc14c4d1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:54 crc kubenswrapper[5133]: I1121 14:04:54.906877 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/d2c71aef-bcfe-42ba-8fcc-4fd36400f190-openstack-config\") pod \"openstackclient\" (UID: \"d2c71aef-bcfe-42ba-8fcc-4fd36400f190\") " pod="openstack/openstackclient" Nov 21 14:04:54 crc kubenswrapper[5133]: I1121 14:04:54.910377 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/d2c71aef-bcfe-42ba-8fcc-4fd36400f190-openstack-config-secret\") pod \"openstackclient\" (UID: \"d2c71aef-bcfe-42ba-8fcc-4fd36400f190\") " pod="openstack/openstackclient" Nov 21 14:04:54 crc kubenswrapper[5133]: I1121 14:04:54.911182 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d2c71aef-bcfe-42ba-8fcc-4fd36400f190-combined-ca-bundle\") pod \"openstackclient\" (UID: \"d2c71aef-bcfe-42ba-8fcc-4fd36400f190\") " pod="openstack/openstackclient" Nov 21 14:04:54 crc kubenswrapper[5133]: I1121 14:04:54.925018 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kgrfl\" (UniqueName: \"kubernetes.io/projected/d2c71aef-bcfe-42ba-8fcc-4fd36400f190-kube-api-access-kgrfl\") pod \"openstackclient\" (UID: \"d2c71aef-bcfe-42ba-8fcc-4fd36400f190\") " pod="openstack/openstackclient" Nov 21 14:04:55 crc kubenswrapper[5133]: I1121 14:04:55.090429 5133 generic.go:334] "Generic (PLEG): container finished" podID="1a96d392-2cf2-44c6-94d0-5df6fc14c4d1" containerID="cfe08cd6dc443799fe9db2f54ab378374873da8c059ebd32e58e0b4023c9ab1c" exitCode=0 Nov 21 14:04:55 crc kubenswrapper[5133]: I1121 14:04:55.090492 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"1a96d392-2cf2-44c6-94d0-5df6fc14c4d1","Type":"ContainerDied","Data":"cfe08cd6dc443799fe9db2f54ab378374873da8c059ebd32e58e0b4023c9ab1c"} Nov 21 14:04:55 crc kubenswrapper[5133]: I1121 14:04:55.090538 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"1a96d392-2cf2-44c6-94d0-5df6fc14c4d1","Type":"ContainerDied","Data":"6b1aa3e98a5abfeabb327fc846990830ce67e374ce055fb9f40f4defa8723fa3"} Nov 21 14:04:55 crc kubenswrapper[5133]: I1121 14:04:55.090561 5133 scope.go:117] "RemoveContainer" containerID="17ace0b59af117302ec2a72213954bdcf8474ff0be18df7894b3886c583675ef" Nov 21 14:04:55 crc kubenswrapper[5133]: I1121 14:04:55.090613 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 21 14:04:55 crc kubenswrapper[5133]: I1121 14:04:55.117380 5133 scope.go:117] "RemoveContainer" containerID="cfe08cd6dc443799fe9db2f54ab378374873da8c059ebd32e58e0b4023c9ab1c" Nov 21 14:04:55 crc kubenswrapper[5133]: I1121 14:04:55.129482 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 21 14:04:55 crc kubenswrapper[5133]: I1121 14:04:55.142862 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 21 14:04:55 crc kubenswrapper[5133]: I1121 14:04:55.148137 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 21 14:04:55 crc kubenswrapper[5133]: I1121 14:04:55.166740 5133 scope.go:117] "RemoveContainer" containerID="17ace0b59af117302ec2a72213954bdcf8474ff0be18df7894b3886c583675ef" Nov 21 14:04:55 crc kubenswrapper[5133]: E1121 14:04:55.167331 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"17ace0b59af117302ec2a72213954bdcf8474ff0be18df7894b3886c583675ef\": container with ID starting with 17ace0b59af117302ec2a72213954bdcf8474ff0be18df7894b3886c583675ef not found: ID does not exist" containerID="17ace0b59af117302ec2a72213954bdcf8474ff0be18df7894b3886c583675ef" Nov 21 14:04:55 crc kubenswrapper[5133]: I1121 14:04:55.167370 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"17ace0b59af117302ec2a72213954bdcf8474ff0be18df7894b3886c583675ef"} err="failed to get container status \"17ace0b59af117302ec2a72213954bdcf8474ff0be18df7894b3886c583675ef\": rpc error: code = NotFound desc = could not find container \"17ace0b59af117302ec2a72213954bdcf8474ff0be18df7894b3886c583675ef\": container with ID starting with 17ace0b59af117302ec2a72213954bdcf8474ff0be18df7894b3886c583675ef not found: ID does not exist" Nov 21 14:04:55 crc kubenswrapper[5133]: I1121 14:04:55.167404 5133 scope.go:117] "RemoveContainer" containerID="cfe08cd6dc443799fe9db2f54ab378374873da8c059ebd32e58e0b4023c9ab1c" Nov 21 14:04:55 crc kubenswrapper[5133]: E1121 14:04:55.167963 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cfe08cd6dc443799fe9db2f54ab378374873da8c059ebd32e58e0b4023c9ab1c\": container with ID starting with cfe08cd6dc443799fe9db2f54ab378374873da8c059ebd32e58e0b4023c9ab1c not found: ID does not exist" containerID="cfe08cd6dc443799fe9db2f54ab378374873da8c059ebd32e58e0b4023c9ab1c" Nov 21 14:04:55 crc kubenswrapper[5133]: I1121 14:04:55.167994 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cfe08cd6dc443799fe9db2f54ab378374873da8c059ebd32e58e0b4023c9ab1c"} err="failed to get container status \"cfe08cd6dc443799fe9db2f54ab378374873da8c059ebd32e58e0b4023c9ab1c\": rpc error: code = NotFound desc = could not find container \"cfe08cd6dc443799fe9db2f54ab378374873da8c059ebd32e58e0b4023c9ab1c\": container with ID starting with cfe08cd6dc443799fe9db2f54ab378374873da8c059ebd32e58e0b4023c9ab1c not found: ID does not exist" Nov 21 14:04:55 crc kubenswrapper[5133]: I1121 14:04:55.171367 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 21 14:04:55 crc kubenswrapper[5133]: I1121 14:04:55.173173 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 21 14:04:55 crc kubenswrapper[5133]: I1121 14:04:55.176510 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 21 14:04:55 crc kubenswrapper[5133]: I1121 14:04:55.194485 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 21 14:04:55 crc kubenswrapper[5133]: I1121 14:04:55.319562 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/46d511f0-7077-446e-b1f6-941fd109c41c-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"46d511f0-7077-446e-b1f6-941fd109c41c\") " pod="openstack/cinder-scheduler-0" Nov 21 14:04:55 crc kubenswrapper[5133]: I1121 14:04:55.319629 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/46d511f0-7077-446e-b1f6-941fd109c41c-config-data\") pod \"cinder-scheduler-0\" (UID: \"46d511f0-7077-446e-b1f6-941fd109c41c\") " pod="openstack/cinder-scheduler-0" Nov 21 14:04:55 crc kubenswrapper[5133]: I1121 14:04:55.319683 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/46d511f0-7077-446e-b1f6-941fd109c41c-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"46d511f0-7077-446e-b1f6-941fd109c41c\") " pod="openstack/cinder-scheduler-0" Nov 21 14:04:55 crc kubenswrapper[5133]: I1121 14:04:55.319702 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/46d511f0-7077-446e-b1f6-941fd109c41c-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"46d511f0-7077-446e-b1f6-941fd109c41c\") " pod="openstack/cinder-scheduler-0" Nov 21 14:04:55 crc kubenswrapper[5133]: I1121 14:04:55.319751 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/46d511f0-7077-446e-b1f6-941fd109c41c-scripts\") pod \"cinder-scheduler-0\" (UID: \"46d511f0-7077-446e-b1f6-941fd109c41c\") " pod="openstack/cinder-scheduler-0" Nov 21 14:04:55 crc kubenswrapper[5133]: I1121 14:04:55.319794 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dqbll\" (UniqueName: \"kubernetes.io/projected/46d511f0-7077-446e-b1f6-941fd109c41c-kube-api-access-dqbll\") pod \"cinder-scheduler-0\" (UID: \"46d511f0-7077-446e-b1f6-941fd109c41c\") " pod="openstack/cinder-scheduler-0" Nov 21 14:04:55 crc kubenswrapper[5133]: I1121 14:04:55.429199 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/46d511f0-7077-446e-b1f6-941fd109c41c-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"46d511f0-7077-446e-b1f6-941fd109c41c\") " pod="openstack/cinder-scheduler-0" Nov 21 14:04:55 crc kubenswrapper[5133]: I1121 14:04:55.429332 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/46d511f0-7077-446e-b1f6-941fd109c41c-config-data\") pod \"cinder-scheduler-0\" (UID: \"46d511f0-7077-446e-b1f6-941fd109c41c\") " pod="openstack/cinder-scheduler-0" Nov 21 14:04:55 crc kubenswrapper[5133]: I1121 14:04:55.429494 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/46d511f0-7077-446e-b1f6-941fd109c41c-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"46d511f0-7077-446e-b1f6-941fd109c41c\") " pod="openstack/cinder-scheduler-0" Nov 21 14:04:55 crc kubenswrapper[5133]: I1121 14:04:55.429583 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/46d511f0-7077-446e-b1f6-941fd109c41c-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"46d511f0-7077-446e-b1f6-941fd109c41c\") " pod="openstack/cinder-scheduler-0" Nov 21 14:04:55 crc kubenswrapper[5133]: I1121 14:04:55.430171 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/46d511f0-7077-446e-b1f6-941fd109c41c-scripts\") pod \"cinder-scheduler-0\" (UID: \"46d511f0-7077-446e-b1f6-941fd109c41c\") " pod="openstack/cinder-scheduler-0" Nov 21 14:04:55 crc kubenswrapper[5133]: I1121 14:04:55.430450 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dqbll\" (UniqueName: \"kubernetes.io/projected/46d511f0-7077-446e-b1f6-941fd109c41c-kube-api-access-dqbll\") pod \"cinder-scheduler-0\" (UID: \"46d511f0-7077-446e-b1f6-941fd109c41c\") " pod="openstack/cinder-scheduler-0" Nov 21 14:04:55 crc kubenswrapper[5133]: I1121 14:04:55.429655 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/46d511f0-7077-446e-b1f6-941fd109c41c-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"46d511f0-7077-446e-b1f6-941fd109c41c\") " pod="openstack/cinder-scheduler-0" Nov 21 14:04:55 crc kubenswrapper[5133]: I1121 14:04:55.450672 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/46d511f0-7077-446e-b1f6-941fd109c41c-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"46d511f0-7077-446e-b1f6-941fd109c41c\") " pod="openstack/cinder-scheduler-0" Nov 21 14:04:55 crc kubenswrapper[5133]: I1121 14:04:55.453746 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/46d511f0-7077-446e-b1f6-941fd109c41c-config-data\") pod \"cinder-scheduler-0\" (UID: \"46d511f0-7077-446e-b1f6-941fd109c41c\") " pod="openstack/cinder-scheduler-0" Nov 21 14:04:55 crc kubenswrapper[5133]: I1121 14:04:55.453838 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/46d511f0-7077-446e-b1f6-941fd109c41c-scripts\") pod \"cinder-scheduler-0\" (UID: \"46d511f0-7077-446e-b1f6-941fd109c41c\") " pod="openstack/cinder-scheduler-0" Nov 21 14:04:55 crc kubenswrapper[5133]: I1121 14:04:55.454096 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/46d511f0-7077-446e-b1f6-941fd109c41c-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"46d511f0-7077-446e-b1f6-941fd109c41c\") " pod="openstack/cinder-scheduler-0" Nov 21 14:04:55 crc kubenswrapper[5133]: I1121 14:04:55.454626 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dqbll\" (UniqueName: \"kubernetes.io/projected/46d511f0-7077-446e-b1f6-941fd109c41c-kube-api-access-dqbll\") pod \"cinder-scheduler-0\" (UID: \"46d511f0-7077-446e-b1f6-941fd109c41c\") " pod="openstack/cinder-scheduler-0" Nov 21 14:04:55 crc kubenswrapper[5133]: I1121 14:04:55.495214 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 21 14:04:55 crc kubenswrapper[5133]: I1121 14:04:55.714924 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 21 14:04:55 crc kubenswrapper[5133]: I1121 14:04:55.896809 5133 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-585c9d5854-m4bwc" podUID="3b99cb93-e45a-4c5a-a449-eafa3241be56" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.146:9311/healthcheck\": read tcp 10.217.0.2:48700->10.217.0.146:9311: read: connection reset by peer" Nov 21 14:04:55 crc kubenswrapper[5133]: I1121 14:04:55.897181 5133 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-585c9d5854-m4bwc" podUID="3b99cb93-e45a-4c5a-a449-eafa3241be56" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.146:9311/healthcheck\": read tcp 10.217.0.2:48708->10.217.0.146:9311: read: connection reset by peer" Nov 21 14:04:56 crc kubenswrapper[5133]: I1121 14:04:56.103777 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"d2c71aef-bcfe-42ba-8fcc-4fd36400f190","Type":"ContainerStarted","Data":"d6e40b063806c7e3396e5341681b1cb4683023c27dcca215f61d09aae85a78d1"} Nov 21 14:04:56 crc kubenswrapper[5133]: I1121 14:04:56.124653 5133 generic.go:334] "Generic (PLEG): container finished" podID="3b99cb93-e45a-4c5a-a449-eafa3241be56" containerID="fc4f79f344f6ec1b3dfae79168535a9af52a0cf9572a1a4c45c62162d45e0d7b" exitCode=0 Nov 21 14:04:56 crc kubenswrapper[5133]: I1121 14:04:56.124725 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-585c9d5854-m4bwc" event={"ID":"3b99cb93-e45a-4c5a-a449-eafa3241be56","Type":"ContainerDied","Data":"fc4f79f344f6ec1b3dfae79168535a9af52a0cf9572a1a4c45c62162d45e0d7b"} Nov 21 14:04:56 crc kubenswrapper[5133]: I1121 14:04:56.156551 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 21 14:04:56 crc kubenswrapper[5133]: I1121 14:04:56.454640 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-585c9d5854-m4bwc" Nov 21 14:04:56 crc kubenswrapper[5133]: I1121 14:04:56.486870 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1a96d392-2cf2-44c6-94d0-5df6fc14c4d1" path="/var/lib/kubelet/pods/1a96d392-2cf2-44c6-94d0-5df6fc14c4d1/volumes" Nov 21 14:04:56 crc kubenswrapper[5133]: I1121 14:04:56.571742 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3b99cb93-e45a-4c5a-a449-eafa3241be56-combined-ca-bundle\") pod \"3b99cb93-e45a-4c5a-a449-eafa3241be56\" (UID: \"3b99cb93-e45a-4c5a-a449-eafa3241be56\") " Nov 21 14:04:56 crc kubenswrapper[5133]: I1121 14:04:56.571800 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3b99cb93-e45a-4c5a-a449-eafa3241be56-config-data-custom\") pod \"3b99cb93-e45a-4c5a-a449-eafa3241be56\" (UID: \"3b99cb93-e45a-4c5a-a449-eafa3241be56\") " Nov 21 14:04:56 crc kubenswrapper[5133]: I1121 14:04:56.571894 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-74jq8\" (UniqueName: \"kubernetes.io/projected/3b99cb93-e45a-4c5a-a449-eafa3241be56-kube-api-access-74jq8\") pod \"3b99cb93-e45a-4c5a-a449-eafa3241be56\" (UID: \"3b99cb93-e45a-4c5a-a449-eafa3241be56\") " Nov 21 14:04:56 crc kubenswrapper[5133]: I1121 14:04:56.571952 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3b99cb93-e45a-4c5a-a449-eafa3241be56-logs\") pod \"3b99cb93-e45a-4c5a-a449-eafa3241be56\" (UID: \"3b99cb93-e45a-4c5a-a449-eafa3241be56\") " Nov 21 14:04:56 crc kubenswrapper[5133]: I1121 14:04:56.572076 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3b99cb93-e45a-4c5a-a449-eafa3241be56-config-data\") pod \"3b99cb93-e45a-4c5a-a449-eafa3241be56\" (UID: \"3b99cb93-e45a-4c5a-a449-eafa3241be56\") " Nov 21 14:04:56 crc kubenswrapper[5133]: I1121 14:04:56.572722 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3b99cb93-e45a-4c5a-a449-eafa3241be56-logs" (OuterVolumeSpecName: "logs") pod "3b99cb93-e45a-4c5a-a449-eafa3241be56" (UID: "3b99cb93-e45a-4c5a-a449-eafa3241be56"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:04:56 crc kubenswrapper[5133]: I1121 14:04:56.583934 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3b99cb93-e45a-4c5a-a449-eafa3241be56-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "3b99cb93-e45a-4c5a-a449-eafa3241be56" (UID: "3b99cb93-e45a-4c5a-a449-eafa3241be56"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:04:56 crc kubenswrapper[5133]: I1121 14:04:56.618213 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3b99cb93-e45a-4c5a-a449-eafa3241be56-kube-api-access-74jq8" (OuterVolumeSpecName: "kube-api-access-74jq8") pod "3b99cb93-e45a-4c5a-a449-eafa3241be56" (UID: "3b99cb93-e45a-4c5a-a449-eafa3241be56"). InnerVolumeSpecName "kube-api-access-74jq8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:04:56 crc kubenswrapper[5133]: I1121 14:04:56.629755 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3b99cb93-e45a-4c5a-a449-eafa3241be56-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3b99cb93-e45a-4c5a-a449-eafa3241be56" (UID: "3b99cb93-e45a-4c5a-a449-eafa3241be56"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:04:56 crc kubenswrapper[5133]: I1121 14:04:56.648862 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3b99cb93-e45a-4c5a-a449-eafa3241be56-config-data" (OuterVolumeSpecName: "config-data") pod "3b99cb93-e45a-4c5a-a449-eafa3241be56" (UID: "3b99cb93-e45a-4c5a-a449-eafa3241be56"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:04:56 crc kubenswrapper[5133]: I1121 14:04:56.675043 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-74jq8\" (UniqueName: \"kubernetes.io/projected/3b99cb93-e45a-4c5a-a449-eafa3241be56-kube-api-access-74jq8\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:56 crc kubenswrapper[5133]: I1121 14:04:56.675081 5133 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3b99cb93-e45a-4c5a-a449-eafa3241be56-logs\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:56 crc kubenswrapper[5133]: I1121 14:04:56.675113 5133 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3b99cb93-e45a-4c5a-a449-eafa3241be56-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:56 crc kubenswrapper[5133]: I1121 14:04:56.675125 5133 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3b99cb93-e45a-4c5a-a449-eafa3241be56-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:56 crc kubenswrapper[5133]: I1121 14:04:56.675136 5133 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3b99cb93-e45a-4c5a-a449-eafa3241be56-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 21 14:04:57 crc kubenswrapper[5133]: I1121 14:04:57.017109 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-59475798db-b6bck" Nov 21 14:04:57 crc kubenswrapper[5133]: I1121 14:04:57.152641 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"46d511f0-7077-446e-b1f6-941fd109c41c","Type":"ContainerStarted","Data":"f28f2cf7390e7e5058321a07f836ff3bd7afcadbf681b68c7a16765099166242"} Nov 21 14:04:57 crc kubenswrapper[5133]: I1121 14:04:57.152701 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"46d511f0-7077-446e-b1f6-941fd109c41c","Type":"ContainerStarted","Data":"89b18aeaa82c0d3d9b8b8195a0416df1fe05ce2694d6924582623ee0444aaf06"} Nov 21 14:04:57 crc kubenswrapper[5133]: I1121 14:04:57.156096 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-585c9d5854-m4bwc" event={"ID":"3b99cb93-e45a-4c5a-a449-eafa3241be56","Type":"ContainerDied","Data":"15ee1a3f491a1ffa03c07748b00a08afbee9c3d0f53dbd1043f0baf55782f42b"} Nov 21 14:04:57 crc kubenswrapper[5133]: I1121 14:04:57.156137 5133 scope.go:117] "RemoveContainer" containerID="fc4f79f344f6ec1b3dfae79168535a9af52a0cf9572a1a4c45c62162d45e0d7b" Nov 21 14:04:57 crc kubenswrapper[5133]: I1121 14:04:57.156324 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-585c9d5854-m4bwc" Nov 21 14:04:57 crc kubenswrapper[5133]: I1121 14:04:57.199106 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-585c9d5854-m4bwc"] Nov 21 14:04:57 crc kubenswrapper[5133]: I1121 14:04:57.215126 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-585c9d5854-m4bwc"] Nov 21 14:04:57 crc kubenswrapper[5133]: I1121 14:04:57.220705 5133 scope.go:117] "RemoveContainer" containerID="189352f52a99ca5f194736c700010e6bc79bd231f68591b8aeee19fc2286688b" Nov 21 14:04:57 crc kubenswrapper[5133]: I1121 14:04:57.556687 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Nov 21 14:04:58 crc kubenswrapper[5133]: I1121 14:04:58.176460 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"46d511f0-7077-446e-b1f6-941fd109c41c","Type":"ContainerStarted","Data":"c56ff31021a4a3d16b1a8daf21378faf341f70fbbc64b02582454a1411da8a00"} Nov 21 14:04:58 crc kubenswrapper[5133]: I1121 14:04:58.209769 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.209742711 podStartE2EDuration="3.209742711s" podCreationTimestamp="2025-11-21 14:04:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 14:04:58.205789996 +0000 UTC m=+1358.003622244" watchObservedRunningTime="2025-11-21 14:04:58.209742711 +0000 UTC m=+1358.007574959" Nov 21 14:04:58 crc kubenswrapper[5133]: I1121 14:04:58.471653 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3b99cb93-e45a-4c5a-a449-eafa3241be56" path="/var/lib/kubelet/pods/3b99cb93-e45a-4c5a-a449-eafa3241be56/volumes" Nov 21 14:04:59 crc kubenswrapper[5133]: I1121 14:04:59.343476 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-6f8dcf7d6f-7dxkg" Nov 21 14:04:59 crc kubenswrapper[5133]: I1121 14:04:59.422830 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-59475798db-b6bck"] Nov 21 14:04:59 crc kubenswrapper[5133]: I1121 14:04:59.426547 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-59475798db-b6bck" podUID="b5beef58-6317-4739-9049-bb385e29db6c" containerName="neutron-api" containerID="cri-o://7d1cd23636e4324c731842ba619c6d612dc2d2f953197b3d531e19c7fe285c43" gracePeriod=30 Nov 21 14:04:59 crc kubenswrapper[5133]: I1121 14:04:59.427106 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-59475798db-b6bck" podUID="b5beef58-6317-4739-9049-bb385e29db6c" containerName="neutron-httpd" containerID="cri-o://4b6e120e604fc4bba3789e42c9c03aa4b1991707de3c88953884d2eb05c1dcd3" gracePeriod=30 Nov 21 14:05:00 crc kubenswrapper[5133]: I1121 14:05:00.219285 5133 generic.go:334] "Generic (PLEG): container finished" podID="b5beef58-6317-4739-9049-bb385e29db6c" containerID="4b6e120e604fc4bba3789e42c9c03aa4b1991707de3c88953884d2eb05c1dcd3" exitCode=0 Nov 21 14:05:00 crc kubenswrapper[5133]: I1121 14:05:00.219341 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-59475798db-b6bck" event={"ID":"b5beef58-6317-4739-9049-bb385e29db6c","Type":"ContainerDied","Data":"4b6e120e604fc4bba3789e42c9c03aa4b1991707de3c88953884d2eb05c1dcd3"} Nov 21 14:05:00 crc kubenswrapper[5133]: I1121 14:05:00.496037 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 21 14:05:01 crc kubenswrapper[5133]: I1121 14:05:01.161462 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 21 14:05:04 crc kubenswrapper[5133]: I1121 14:05:04.994324 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 21 14:05:04 crc kubenswrapper[5133]: I1121 14:05:04.995703 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="657c535b-7845-46e6-8a0e-57a1c694bae5" containerName="ceilometer-central-agent" containerID="cri-o://58068de407f13a486f898dc36f46c1891b1001d709fc556da3fdc711a643b582" gracePeriod=30 Nov 21 14:05:04 crc kubenswrapper[5133]: I1121 14:05:04.996301 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="657c535b-7845-46e6-8a0e-57a1c694bae5" containerName="sg-core" containerID="cri-o://091d4de167f9c4af53e8fb653a12c6c5e3b54a02bc7bccead310be9d9ebdae72" gracePeriod=30 Nov 21 14:05:04 crc kubenswrapper[5133]: I1121 14:05:04.996318 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="657c535b-7845-46e6-8a0e-57a1c694bae5" containerName="proxy-httpd" containerID="cri-o://b3aca601ae498023a91992a1c5d57804628421c59fc9f7347650c01a486e6357" gracePeriod=30 Nov 21 14:05:04 crc kubenswrapper[5133]: I1121 14:05:04.996346 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="657c535b-7845-46e6-8a0e-57a1c694bae5" containerName="ceilometer-notification-agent" containerID="cri-o://9ede2213806aed9f825688e4b9df1ba079e12cc270e67531b1cc8d05ee90c1e6" gracePeriod=30 Nov 21 14:05:05 crc kubenswrapper[5133]: I1121 14:05:05.294056 5133 generic.go:334] "Generic (PLEG): container finished" podID="657c535b-7845-46e6-8a0e-57a1c694bae5" containerID="b3aca601ae498023a91992a1c5d57804628421c59fc9f7347650c01a486e6357" exitCode=0 Nov 21 14:05:05 crc kubenswrapper[5133]: I1121 14:05:05.294415 5133 generic.go:334] "Generic (PLEG): container finished" podID="657c535b-7845-46e6-8a0e-57a1c694bae5" containerID="091d4de167f9c4af53e8fb653a12c6c5e3b54a02bc7bccead310be9d9ebdae72" exitCode=2 Nov 21 14:05:05 crc kubenswrapper[5133]: I1121 14:05:05.294170 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"657c535b-7845-46e6-8a0e-57a1c694bae5","Type":"ContainerDied","Data":"b3aca601ae498023a91992a1c5d57804628421c59fc9f7347650c01a486e6357"} Nov 21 14:05:05 crc kubenswrapper[5133]: I1121 14:05:05.294620 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"657c535b-7845-46e6-8a0e-57a1c694bae5","Type":"ContainerDied","Data":"091d4de167f9c4af53e8fb653a12c6c5e3b54a02bc7bccead310be9d9ebdae72"} Nov 21 14:05:05 crc kubenswrapper[5133]: I1121 14:05:05.302240 5133 generic.go:334] "Generic (PLEG): container finished" podID="b5beef58-6317-4739-9049-bb385e29db6c" containerID="7d1cd23636e4324c731842ba619c6d612dc2d2f953197b3d531e19c7fe285c43" exitCode=0 Nov 21 14:05:05 crc kubenswrapper[5133]: I1121 14:05:05.302289 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-59475798db-b6bck" event={"ID":"b5beef58-6317-4739-9049-bb385e29db6c","Type":"ContainerDied","Data":"7d1cd23636e4324c731842ba619c6d612dc2d2f953197b3d531e19c7fe285c43"} Nov 21 14:05:05 crc kubenswrapper[5133]: I1121 14:05:05.846411 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 21 14:05:06 crc kubenswrapper[5133]: I1121 14:05:06.324798 5133 generic.go:334] "Generic (PLEG): container finished" podID="657c535b-7845-46e6-8a0e-57a1c694bae5" containerID="58068de407f13a486f898dc36f46c1891b1001d709fc556da3fdc711a643b582" exitCode=0 Nov 21 14:05:06 crc kubenswrapper[5133]: I1121 14:05:06.324859 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"657c535b-7845-46e6-8a0e-57a1c694bae5","Type":"ContainerDied","Data":"58068de407f13a486f898dc36f46c1891b1001d709fc556da3fdc711a643b582"} Nov 21 14:05:07 crc kubenswrapper[5133]: I1121 14:05:07.348737 5133 generic.go:334] "Generic (PLEG): container finished" podID="657c535b-7845-46e6-8a0e-57a1c694bae5" containerID="9ede2213806aed9f825688e4b9df1ba079e12cc270e67531b1cc8d05ee90c1e6" exitCode=0 Nov 21 14:05:07 crc kubenswrapper[5133]: I1121 14:05:07.348830 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"657c535b-7845-46e6-8a0e-57a1c694bae5","Type":"ContainerDied","Data":"9ede2213806aed9f825688e4b9df1ba079e12cc270e67531b1cc8d05ee90c1e6"} Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.186465 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.225692 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/657c535b-7845-46e6-8a0e-57a1c694bae5-run-httpd\") pod \"657c535b-7845-46e6-8a0e-57a1c694bae5\" (UID: \"657c535b-7845-46e6-8a0e-57a1c694bae5\") " Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.225772 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/657c535b-7845-46e6-8a0e-57a1c694bae5-scripts\") pod \"657c535b-7845-46e6-8a0e-57a1c694bae5\" (UID: \"657c535b-7845-46e6-8a0e-57a1c694bae5\") " Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.225798 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/657c535b-7845-46e6-8a0e-57a1c694bae5-log-httpd\") pod \"657c535b-7845-46e6-8a0e-57a1c694bae5\" (UID: \"657c535b-7845-46e6-8a0e-57a1c694bae5\") " Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.225822 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bnxcq\" (UniqueName: \"kubernetes.io/projected/657c535b-7845-46e6-8a0e-57a1c694bae5-kube-api-access-bnxcq\") pod \"657c535b-7845-46e6-8a0e-57a1c694bae5\" (UID: \"657c535b-7845-46e6-8a0e-57a1c694bae5\") " Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.225921 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/657c535b-7845-46e6-8a0e-57a1c694bae5-combined-ca-bundle\") pod \"657c535b-7845-46e6-8a0e-57a1c694bae5\" (UID: \"657c535b-7845-46e6-8a0e-57a1c694bae5\") " Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.226031 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/657c535b-7845-46e6-8a0e-57a1c694bae5-sg-core-conf-yaml\") pod \"657c535b-7845-46e6-8a0e-57a1c694bae5\" (UID: \"657c535b-7845-46e6-8a0e-57a1c694bae5\") " Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.226070 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/657c535b-7845-46e6-8a0e-57a1c694bae5-config-data\") pod \"657c535b-7845-46e6-8a0e-57a1c694bae5\" (UID: \"657c535b-7845-46e6-8a0e-57a1c694bae5\") " Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.228720 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/657c535b-7845-46e6-8a0e-57a1c694bae5-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "657c535b-7845-46e6-8a0e-57a1c694bae5" (UID: "657c535b-7845-46e6-8a0e-57a1c694bae5"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.228914 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/657c535b-7845-46e6-8a0e-57a1c694bae5-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "657c535b-7845-46e6-8a0e-57a1c694bae5" (UID: "657c535b-7845-46e6-8a0e-57a1c694bae5"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.236808 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/657c535b-7845-46e6-8a0e-57a1c694bae5-kube-api-access-bnxcq" (OuterVolumeSpecName: "kube-api-access-bnxcq") pod "657c535b-7845-46e6-8a0e-57a1c694bae5" (UID: "657c535b-7845-46e6-8a0e-57a1c694bae5"). InnerVolumeSpecName "kube-api-access-bnxcq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.252629 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/657c535b-7845-46e6-8a0e-57a1c694bae5-scripts" (OuterVolumeSpecName: "scripts") pod "657c535b-7845-46e6-8a0e-57a1c694bae5" (UID: "657c535b-7845-46e6-8a0e-57a1c694bae5"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.261761 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/657c535b-7845-46e6-8a0e-57a1c694bae5-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "657c535b-7845-46e6-8a0e-57a1c694bae5" (UID: "657c535b-7845-46e6-8a0e-57a1c694bae5"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.329118 5133 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/657c535b-7845-46e6-8a0e-57a1c694bae5-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.329150 5133 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/657c535b-7845-46e6-8a0e-57a1c694bae5-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.329161 5133 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/657c535b-7845-46e6-8a0e-57a1c694bae5-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.329172 5133 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/657c535b-7845-46e6-8a0e-57a1c694bae5-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.329184 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bnxcq\" (UniqueName: \"kubernetes.io/projected/657c535b-7845-46e6-8a0e-57a1c694bae5-kube-api-access-bnxcq\") on node \"crc\" DevicePath \"\"" Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.362067 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/657c535b-7845-46e6-8a0e-57a1c694bae5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "657c535b-7845-46e6-8a0e-57a1c694bae5" (UID: "657c535b-7845-46e6-8a0e-57a1c694bae5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.373934 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-59475798db-b6bck" Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.383843 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"657c535b-7845-46e6-8a0e-57a1c694bae5","Type":"ContainerDied","Data":"8d5098396646a96925e2067fbbfaee0d388e4d40228da697732a02131822fdc6"} Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.383894 5133 scope.go:117] "RemoveContainer" containerID="b3aca601ae498023a91992a1c5d57804628421c59fc9f7347650c01a486e6357" Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.384783 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.415619 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-59475798db-b6bck" event={"ID":"b5beef58-6317-4739-9049-bb385e29db6c","Type":"ContainerDied","Data":"2c559cc42e14a3c1cf6b828f27da77ed04fc9138115b34056acaf949e64bb8d4"} Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.415721 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-59475798db-b6bck" Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.434369 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/b5beef58-6317-4739-9049-bb385e29db6c-config\") pod \"b5beef58-6317-4739-9049-bb385e29db6c\" (UID: \"b5beef58-6317-4739-9049-bb385e29db6c\") " Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.434444 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/b5beef58-6317-4739-9049-bb385e29db6c-httpd-config\") pod \"b5beef58-6317-4739-9049-bb385e29db6c\" (UID: \"b5beef58-6317-4739-9049-bb385e29db6c\") " Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.434577 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k6vg9\" (UniqueName: \"kubernetes.io/projected/b5beef58-6317-4739-9049-bb385e29db6c-kube-api-access-k6vg9\") pod \"b5beef58-6317-4739-9049-bb385e29db6c\" (UID: \"b5beef58-6317-4739-9049-bb385e29db6c\") " Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.434625 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5beef58-6317-4739-9049-bb385e29db6c-combined-ca-bundle\") pod \"b5beef58-6317-4739-9049-bb385e29db6c\" (UID: \"b5beef58-6317-4739-9049-bb385e29db6c\") " Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.434693 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/b5beef58-6317-4739-9049-bb385e29db6c-ovndb-tls-certs\") pod \"b5beef58-6317-4739-9049-bb385e29db6c\" (UID: \"b5beef58-6317-4739-9049-bb385e29db6c\") " Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.435092 5133 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/657c535b-7845-46e6-8a0e-57a1c694bae5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.452030 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b5beef58-6317-4739-9049-bb385e29db6c-kube-api-access-k6vg9" (OuterVolumeSpecName: "kube-api-access-k6vg9") pod "b5beef58-6317-4739-9049-bb385e29db6c" (UID: "b5beef58-6317-4739-9049-bb385e29db6c"). InnerVolumeSpecName "kube-api-access-k6vg9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.470644 5133 scope.go:117] "RemoveContainer" containerID="091d4de167f9c4af53e8fb653a12c6c5e3b54a02bc7bccead310be9d9ebdae72" Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.528743 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b5beef58-6317-4739-9049-bb385e29db6c-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "b5beef58-6317-4739-9049-bb385e29db6c" (UID: "b5beef58-6317-4739-9049-bb385e29db6c"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.529062 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/657c535b-7845-46e6-8a0e-57a1c694bae5-config-data" (OuterVolumeSpecName: "config-data") pod "657c535b-7845-46e6-8a0e-57a1c694bae5" (UID: "657c535b-7845-46e6-8a0e-57a1c694bae5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.553972 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=2.513659937 podStartE2EDuration="14.553949228s" podCreationTimestamp="2025-11-21 14:04:54 +0000 UTC" firstStartedPulling="2025-11-21 14:04:55.730194895 +0000 UTC m=+1355.528027133" lastFinishedPulling="2025-11-21 14:05:07.770484176 +0000 UTC m=+1367.568316424" observedRunningTime="2025-11-21 14:05:08.551463152 +0000 UTC m=+1368.349295400" watchObservedRunningTime="2025-11-21 14:05:08.553949228 +0000 UTC m=+1368.351781476" Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.554331 5133 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/b5beef58-6317-4739-9049-bb385e29db6c-httpd-config\") on node \"crc\" DevicePath \"\"" Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.557519 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k6vg9\" (UniqueName: \"kubernetes.io/projected/b5beef58-6317-4739-9049-bb385e29db6c-kube-api-access-k6vg9\") on node \"crc\" DevicePath \"\"" Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.557555 5133 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/657c535b-7845-46e6-8a0e-57a1c694bae5-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.557823 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"d2c71aef-bcfe-42ba-8fcc-4fd36400f190","Type":"ContainerStarted","Data":"04ae912336d8fe816e717131605d57fcbcbfc45c928953510804f49c5d1c5243"} Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.560604 5133 scope.go:117] "RemoveContainer" containerID="9ede2213806aed9f825688e4b9df1ba079e12cc270e67531b1cc8d05ee90c1e6" Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.575026 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b5beef58-6317-4739-9049-bb385e29db6c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b5beef58-6317-4739-9049-bb385e29db6c" (UID: "b5beef58-6317-4739-9049-bb385e29db6c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.581484 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b5beef58-6317-4739-9049-bb385e29db6c-config" (OuterVolumeSpecName: "config") pod "b5beef58-6317-4739-9049-bb385e29db6c" (UID: "b5beef58-6317-4739-9049-bb385e29db6c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.605514 5133 scope.go:117] "RemoveContainer" containerID="58068de407f13a486f898dc36f46c1891b1001d709fc556da3fdc711a643b582" Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.625980 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b5beef58-6317-4739-9049-bb385e29db6c-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "b5beef58-6317-4739-9049-bb385e29db6c" (UID: "b5beef58-6317-4739-9049-bb385e29db6c"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.659270 5133 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/b5beef58-6317-4739-9049-bb385e29db6c-config\") on node \"crc\" DevicePath \"\"" Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.659311 5133 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5beef58-6317-4739-9049-bb385e29db6c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.659324 5133 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/b5beef58-6317-4739-9049-bb385e29db6c-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.752792 5133 scope.go:117] "RemoveContainer" containerID="4b6e120e604fc4bba3789e42c9c03aa4b1991707de3c88953884d2eb05c1dcd3" Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.773542 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.777380 5133 scope.go:117] "RemoveContainer" containerID="7d1cd23636e4324c731842ba619c6d612dc2d2f953197b3d531e19c7fe285c43" Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.790427 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.800148 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-59475798db-b6bck"] Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.809049 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-59475798db-b6bck"] Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.817387 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 21 14:05:08 crc kubenswrapper[5133]: E1121 14:05:08.817857 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3b99cb93-e45a-4c5a-a449-eafa3241be56" containerName="barbican-api-log" Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.817871 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="3b99cb93-e45a-4c5a-a449-eafa3241be56" containerName="barbican-api-log" Nov 21 14:05:08 crc kubenswrapper[5133]: E1121 14:05:08.817884 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="657c535b-7845-46e6-8a0e-57a1c694bae5" containerName="ceilometer-central-agent" Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.817890 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="657c535b-7845-46e6-8a0e-57a1c694bae5" containerName="ceilometer-central-agent" Nov 21 14:05:08 crc kubenswrapper[5133]: E1121 14:05:08.817902 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3b99cb93-e45a-4c5a-a449-eafa3241be56" containerName="barbican-api" Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.817908 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="3b99cb93-e45a-4c5a-a449-eafa3241be56" containerName="barbican-api" Nov 21 14:05:08 crc kubenswrapper[5133]: E1121 14:05:08.817921 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="657c535b-7845-46e6-8a0e-57a1c694bae5" containerName="sg-core" Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.817927 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="657c535b-7845-46e6-8a0e-57a1c694bae5" containerName="sg-core" Nov 21 14:05:08 crc kubenswrapper[5133]: E1121 14:05:08.817937 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="657c535b-7845-46e6-8a0e-57a1c694bae5" containerName="proxy-httpd" Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.817942 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="657c535b-7845-46e6-8a0e-57a1c694bae5" containerName="proxy-httpd" Nov 21 14:05:08 crc kubenswrapper[5133]: E1121 14:05:08.817951 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b5beef58-6317-4739-9049-bb385e29db6c" containerName="neutron-httpd" Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.817957 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="b5beef58-6317-4739-9049-bb385e29db6c" containerName="neutron-httpd" Nov 21 14:05:08 crc kubenswrapper[5133]: E1121 14:05:08.817970 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b5beef58-6317-4739-9049-bb385e29db6c" containerName="neutron-api" Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.817977 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="b5beef58-6317-4739-9049-bb385e29db6c" containerName="neutron-api" Nov 21 14:05:08 crc kubenswrapper[5133]: E1121 14:05:08.817990 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="657c535b-7845-46e6-8a0e-57a1c694bae5" containerName="ceilometer-notification-agent" Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.818010 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="657c535b-7845-46e6-8a0e-57a1c694bae5" containerName="ceilometer-notification-agent" Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.818163 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="3b99cb93-e45a-4c5a-a449-eafa3241be56" containerName="barbican-api-log" Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.818175 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="3b99cb93-e45a-4c5a-a449-eafa3241be56" containerName="barbican-api" Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.818188 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="b5beef58-6317-4739-9049-bb385e29db6c" containerName="neutron-api" Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.818198 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="657c535b-7845-46e6-8a0e-57a1c694bae5" containerName="ceilometer-central-agent" Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.818210 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="657c535b-7845-46e6-8a0e-57a1c694bae5" containerName="ceilometer-notification-agent" Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.818221 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="657c535b-7845-46e6-8a0e-57a1c694bae5" containerName="sg-core" Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.818232 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="b5beef58-6317-4739-9049-bb385e29db6c" containerName="neutron-httpd" Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.818241 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="657c535b-7845-46e6-8a0e-57a1c694bae5" containerName="proxy-httpd" Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.819848 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.822512 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.822743 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.839702 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.964183 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b7aa32bb-3ee7-4954-906d-37262ff4637f-scripts\") pod \"ceilometer-0\" (UID: \"b7aa32bb-3ee7-4954-906d-37262ff4637f\") " pod="openstack/ceilometer-0" Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.964232 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b7aa32bb-3ee7-4954-906d-37262ff4637f-run-httpd\") pod \"ceilometer-0\" (UID: \"b7aa32bb-3ee7-4954-906d-37262ff4637f\") " pod="openstack/ceilometer-0" Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.964294 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x8b9p\" (UniqueName: \"kubernetes.io/projected/b7aa32bb-3ee7-4954-906d-37262ff4637f-kube-api-access-x8b9p\") pod \"ceilometer-0\" (UID: \"b7aa32bb-3ee7-4954-906d-37262ff4637f\") " pod="openstack/ceilometer-0" Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.964348 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b7aa32bb-3ee7-4954-906d-37262ff4637f-log-httpd\") pod \"ceilometer-0\" (UID: \"b7aa32bb-3ee7-4954-906d-37262ff4637f\") " pod="openstack/ceilometer-0" Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.964393 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b7aa32bb-3ee7-4954-906d-37262ff4637f-config-data\") pod \"ceilometer-0\" (UID: \"b7aa32bb-3ee7-4954-906d-37262ff4637f\") " pod="openstack/ceilometer-0" Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.964429 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b7aa32bb-3ee7-4954-906d-37262ff4637f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b7aa32bb-3ee7-4954-906d-37262ff4637f\") " pod="openstack/ceilometer-0" Nov 21 14:05:08 crc kubenswrapper[5133]: I1121 14:05:08.964453 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b7aa32bb-3ee7-4954-906d-37262ff4637f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b7aa32bb-3ee7-4954-906d-37262ff4637f\") " pod="openstack/ceilometer-0" Nov 21 14:05:09 crc kubenswrapper[5133]: I1121 14:05:09.065926 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b7aa32bb-3ee7-4954-906d-37262ff4637f-log-httpd\") pod \"ceilometer-0\" (UID: \"b7aa32bb-3ee7-4954-906d-37262ff4637f\") " pod="openstack/ceilometer-0" Nov 21 14:05:09 crc kubenswrapper[5133]: I1121 14:05:09.066121 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b7aa32bb-3ee7-4954-906d-37262ff4637f-config-data\") pod \"ceilometer-0\" (UID: \"b7aa32bb-3ee7-4954-906d-37262ff4637f\") " pod="openstack/ceilometer-0" Nov 21 14:05:09 crc kubenswrapper[5133]: I1121 14:05:09.066153 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b7aa32bb-3ee7-4954-906d-37262ff4637f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b7aa32bb-3ee7-4954-906d-37262ff4637f\") " pod="openstack/ceilometer-0" Nov 21 14:05:09 crc kubenswrapper[5133]: I1121 14:05:09.066167 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b7aa32bb-3ee7-4954-906d-37262ff4637f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b7aa32bb-3ee7-4954-906d-37262ff4637f\") " pod="openstack/ceilometer-0" Nov 21 14:05:09 crc kubenswrapper[5133]: I1121 14:05:09.066213 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b7aa32bb-3ee7-4954-906d-37262ff4637f-scripts\") pod \"ceilometer-0\" (UID: \"b7aa32bb-3ee7-4954-906d-37262ff4637f\") " pod="openstack/ceilometer-0" Nov 21 14:05:09 crc kubenswrapper[5133]: I1121 14:05:09.066239 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b7aa32bb-3ee7-4954-906d-37262ff4637f-run-httpd\") pod \"ceilometer-0\" (UID: \"b7aa32bb-3ee7-4954-906d-37262ff4637f\") " pod="openstack/ceilometer-0" Nov 21 14:05:09 crc kubenswrapper[5133]: I1121 14:05:09.066269 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x8b9p\" (UniqueName: \"kubernetes.io/projected/b7aa32bb-3ee7-4954-906d-37262ff4637f-kube-api-access-x8b9p\") pod \"ceilometer-0\" (UID: \"b7aa32bb-3ee7-4954-906d-37262ff4637f\") " pod="openstack/ceilometer-0" Nov 21 14:05:09 crc kubenswrapper[5133]: I1121 14:05:09.066468 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b7aa32bb-3ee7-4954-906d-37262ff4637f-log-httpd\") pod \"ceilometer-0\" (UID: \"b7aa32bb-3ee7-4954-906d-37262ff4637f\") " pod="openstack/ceilometer-0" Nov 21 14:05:09 crc kubenswrapper[5133]: I1121 14:05:09.067060 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b7aa32bb-3ee7-4954-906d-37262ff4637f-run-httpd\") pod \"ceilometer-0\" (UID: \"b7aa32bb-3ee7-4954-906d-37262ff4637f\") " pod="openstack/ceilometer-0" Nov 21 14:05:09 crc kubenswrapper[5133]: I1121 14:05:09.071714 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b7aa32bb-3ee7-4954-906d-37262ff4637f-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b7aa32bb-3ee7-4954-906d-37262ff4637f\") " pod="openstack/ceilometer-0" Nov 21 14:05:09 crc kubenswrapper[5133]: I1121 14:05:09.072578 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b7aa32bb-3ee7-4954-906d-37262ff4637f-scripts\") pod \"ceilometer-0\" (UID: \"b7aa32bb-3ee7-4954-906d-37262ff4637f\") " pod="openstack/ceilometer-0" Nov 21 14:05:09 crc kubenswrapper[5133]: I1121 14:05:09.074358 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b7aa32bb-3ee7-4954-906d-37262ff4637f-config-data\") pod \"ceilometer-0\" (UID: \"b7aa32bb-3ee7-4954-906d-37262ff4637f\") " pod="openstack/ceilometer-0" Nov 21 14:05:09 crc kubenswrapper[5133]: I1121 14:05:09.076842 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b7aa32bb-3ee7-4954-906d-37262ff4637f-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b7aa32bb-3ee7-4954-906d-37262ff4637f\") " pod="openstack/ceilometer-0" Nov 21 14:05:09 crc kubenswrapper[5133]: I1121 14:05:09.085697 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x8b9p\" (UniqueName: \"kubernetes.io/projected/b7aa32bb-3ee7-4954-906d-37262ff4637f-kube-api-access-x8b9p\") pod \"ceilometer-0\" (UID: \"b7aa32bb-3ee7-4954-906d-37262ff4637f\") " pod="openstack/ceilometer-0" Nov 21 14:05:09 crc kubenswrapper[5133]: I1121 14:05:09.151809 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 14:05:09 crc kubenswrapper[5133]: I1121 14:05:09.725047 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 21 14:05:10 crc kubenswrapper[5133]: I1121 14:05:10.171971 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 21 14:05:10 crc kubenswrapper[5133]: I1121 14:05:10.469144 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="657c535b-7845-46e6-8a0e-57a1c694bae5" path="/var/lib/kubelet/pods/657c535b-7845-46e6-8a0e-57a1c694bae5/volumes" Nov 21 14:05:10 crc kubenswrapper[5133]: I1121 14:05:10.470363 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b5beef58-6317-4739-9049-bb385e29db6c" path="/var/lib/kubelet/pods/b5beef58-6317-4739-9049-bb385e29db6c/volumes" Nov 21 14:05:10 crc kubenswrapper[5133]: I1121 14:05:10.506341 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b7aa32bb-3ee7-4954-906d-37262ff4637f","Type":"ContainerStarted","Data":"4aa96e95e574b8b2ba92c0677b3baeb7bf0aec250e6b6df4d57d2458805e43b6"} Nov 21 14:05:10 crc kubenswrapper[5133]: I1121 14:05:10.506397 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b7aa32bb-3ee7-4954-906d-37262ff4637f","Type":"ContainerStarted","Data":"b1e7a41ebf5af0cc12f8c5bea1f7f010015cd11245ac3cbff95ef69ec68b0676"} Nov 21 14:05:11 crc kubenswrapper[5133]: I1121 14:05:11.516786 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b7aa32bb-3ee7-4954-906d-37262ff4637f","Type":"ContainerStarted","Data":"614cefaf7e9736265ec1c4efaaea778369e6944c8be9c665fe3c9a6ba80a94ca"} Nov 21 14:05:13 crc kubenswrapper[5133]: I1121 14:05:13.558981 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b7aa32bb-3ee7-4954-906d-37262ff4637f","Type":"ContainerStarted","Data":"aaf6cbe432d492391858df4add3481ecbb299d8cb9f0960501f0f1e9e3a0b68a"} Nov 21 14:05:14 crc kubenswrapper[5133]: I1121 14:05:14.578717 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b7aa32bb-3ee7-4954-906d-37262ff4637f","Type":"ContainerStarted","Data":"d607039cd6e336e626c7f791fa4173ea4eb51388ee4732d871920d842ca51da0"} Nov 21 14:05:14 crc kubenswrapper[5133]: I1121 14:05:14.579793 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 21 14:05:14 crc kubenswrapper[5133]: I1121 14:05:14.579707 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b7aa32bb-3ee7-4954-906d-37262ff4637f" containerName="proxy-httpd" containerID="cri-o://d607039cd6e336e626c7f791fa4173ea4eb51388ee4732d871920d842ca51da0" gracePeriod=30 Nov 21 14:05:14 crc kubenswrapper[5133]: I1121 14:05:14.579175 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b7aa32bb-3ee7-4954-906d-37262ff4637f" containerName="ceilometer-central-agent" containerID="cri-o://4aa96e95e574b8b2ba92c0677b3baeb7bf0aec250e6b6df4d57d2458805e43b6" gracePeriod=30 Nov 21 14:05:14 crc kubenswrapper[5133]: I1121 14:05:14.579733 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b7aa32bb-3ee7-4954-906d-37262ff4637f" containerName="ceilometer-notification-agent" containerID="cri-o://614cefaf7e9736265ec1c4efaaea778369e6944c8be9c665fe3c9a6ba80a94ca" gracePeriod=30 Nov 21 14:05:14 crc kubenswrapper[5133]: I1121 14:05:14.579723 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b7aa32bb-3ee7-4954-906d-37262ff4637f" containerName="sg-core" containerID="cri-o://aaf6cbe432d492391858df4add3481ecbb299d8cb9f0960501f0f1e9e3a0b68a" gracePeriod=30 Nov 21 14:05:15 crc kubenswrapper[5133]: I1121 14:05:15.593290 5133 generic.go:334] "Generic (PLEG): container finished" podID="b7aa32bb-3ee7-4954-906d-37262ff4637f" containerID="aaf6cbe432d492391858df4add3481ecbb299d8cb9f0960501f0f1e9e3a0b68a" exitCode=2 Nov 21 14:05:15 crc kubenswrapper[5133]: I1121 14:05:15.593678 5133 generic.go:334] "Generic (PLEG): container finished" podID="b7aa32bb-3ee7-4954-906d-37262ff4637f" containerID="614cefaf7e9736265ec1c4efaaea778369e6944c8be9c665fe3c9a6ba80a94ca" exitCode=0 Nov 21 14:05:15 crc kubenswrapper[5133]: I1121 14:05:15.593360 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b7aa32bb-3ee7-4954-906d-37262ff4637f","Type":"ContainerDied","Data":"aaf6cbe432d492391858df4add3481ecbb299d8cb9f0960501f0f1e9e3a0b68a"} Nov 21 14:05:15 crc kubenswrapper[5133]: I1121 14:05:15.593719 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b7aa32bb-3ee7-4954-906d-37262ff4637f","Type":"ContainerDied","Data":"614cefaf7e9736265ec1c4efaaea778369e6944c8be9c665fe3c9a6ba80a94ca"} Nov 21 14:05:18 crc kubenswrapper[5133]: I1121 14:05:18.622573 5133 generic.go:334] "Generic (PLEG): container finished" podID="b7aa32bb-3ee7-4954-906d-37262ff4637f" containerID="4aa96e95e574b8b2ba92c0677b3baeb7bf0aec250e6b6df4d57d2458805e43b6" exitCode=0 Nov 21 14:05:18 crc kubenswrapper[5133]: I1121 14:05:18.622630 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b7aa32bb-3ee7-4954-906d-37262ff4637f","Type":"ContainerDied","Data":"4aa96e95e574b8b2ba92c0677b3baeb7bf0aec250e6b6df4d57d2458805e43b6"} Nov 21 14:05:21 crc kubenswrapper[5133]: I1121 14:05:21.785818 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=9.251493532 podStartE2EDuration="13.785791809s" podCreationTimestamp="2025-11-21 14:05:08 +0000 UTC" firstStartedPulling="2025-11-21 14:05:09.732496451 +0000 UTC m=+1369.530328699" lastFinishedPulling="2025-11-21 14:05:14.266794728 +0000 UTC m=+1374.064626976" observedRunningTime="2025-11-21 14:05:14.627722196 +0000 UTC m=+1374.425554444" watchObservedRunningTime="2025-11-21 14:05:21.785791809 +0000 UTC m=+1381.583624057" Nov 21 14:05:21 crc kubenswrapper[5133]: I1121 14:05:21.790972 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-wdldd"] Nov 21 14:05:21 crc kubenswrapper[5133]: I1121 14:05:21.792306 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-wdldd" Nov 21 14:05:21 crc kubenswrapper[5133]: I1121 14:05:21.800042 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-wdldd"] Nov 21 14:05:21 crc kubenswrapper[5133]: I1121 14:05:21.895702 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-p6q9z"] Nov 21 14:05:21 crc kubenswrapper[5133]: I1121 14:05:21.897080 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-p6q9z" Nov 21 14:05:21 crc kubenswrapper[5133]: I1121 14:05:21.907710 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gskk5\" (UniqueName: \"kubernetes.io/projected/c609a47f-a27a-4a9e-b3bc-6e8f7dfafc01-kube-api-access-gskk5\") pod \"nova-api-db-create-wdldd\" (UID: \"c609a47f-a27a-4a9e-b3bc-6e8f7dfafc01\") " pod="openstack/nova-api-db-create-wdldd" Nov 21 14:05:21 crc kubenswrapper[5133]: I1121 14:05:21.907930 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c609a47f-a27a-4a9e-b3bc-6e8f7dfafc01-operator-scripts\") pod \"nova-api-db-create-wdldd\" (UID: \"c609a47f-a27a-4a9e-b3bc-6e8f7dfafc01\") " pod="openstack/nova-api-db-create-wdldd" Nov 21 14:05:21 crc kubenswrapper[5133]: I1121 14:05:21.918529 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-p6q9z"] Nov 21 14:05:21 crc kubenswrapper[5133]: I1121 14:05:21.932695 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-128b-account-create-rbwdb"] Nov 21 14:05:21 crc kubenswrapper[5133]: I1121 14:05:21.934141 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-128b-account-create-rbwdb" Nov 21 14:05:21 crc kubenswrapper[5133]: I1121 14:05:21.938068 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Nov 21 14:05:21 crc kubenswrapper[5133]: I1121 14:05:21.967880 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-128b-account-create-rbwdb"] Nov 21 14:05:22 crc kubenswrapper[5133]: I1121 14:05:22.010968 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c609a47f-a27a-4a9e-b3bc-6e8f7dfafc01-operator-scripts\") pod \"nova-api-db-create-wdldd\" (UID: \"c609a47f-a27a-4a9e-b3bc-6e8f7dfafc01\") " pod="openstack/nova-api-db-create-wdldd" Nov 21 14:05:22 crc kubenswrapper[5133]: I1121 14:05:22.011081 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gskk5\" (UniqueName: \"kubernetes.io/projected/c609a47f-a27a-4a9e-b3bc-6e8f7dfafc01-kube-api-access-gskk5\") pod \"nova-api-db-create-wdldd\" (UID: \"c609a47f-a27a-4a9e-b3bc-6e8f7dfafc01\") " pod="openstack/nova-api-db-create-wdldd" Nov 21 14:05:22 crc kubenswrapper[5133]: I1121 14:05:22.011119 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/07bf540e-b52e-48fa-8c2d-2b95483089c4-operator-scripts\") pod \"nova-api-128b-account-create-rbwdb\" (UID: \"07bf540e-b52e-48fa-8c2d-2b95483089c4\") " pod="openstack/nova-api-128b-account-create-rbwdb" Nov 21 14:05:22 crc kubenswrapper[5133]: I1121 14:05:22.011144 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8skkg\" (UniqueName: \"kubernetes.io/projected/7f29a717-fad3-49bb-96bb-6d753e70d414-kube-api-access-8skkg\") pod \"nova-cell0-db-create-p6q9z\" (UID: \"7f29a717-fad3-49bb-96bb-6d753e70d414\") " pod="openstack/nova-cell0-db-create-p6q9z" Nov 21 14:05:22 crc kubenswrapper[5133]: I1121 14:05:22.011161 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vqr4s\" (UniqueName: \"kubernetes.io/projected/07bf540e-b52e-48fa-8c2d-2b95483089c4-kube-api-access-vqr4s\") pod \"nova-api-128b-account-create-rbwdb\" (UID: \"07bf540e-b52e-48fa-8c2d-2b95483089c4\") " pod="openstack/nova-api-128b-account-create-rbwdb" Nov 21 14:05:22 crc kubenswrapper[5133]: I1121 14:05:22.011181 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7f29a717-fad3-49bb-96bb-6d753e70d414-operator-scripts\") pod \"nova-cell0-db-create-p6q9z\" (UID: \"7f29a717-fad3-49bb-96bb-6d753e70d414\") " pod="openstack/nova-cell0-db-create-p6q9z" Nov 21 14:05:22 crc kubenswrapper[5133]: I1121 14:05:22.012036 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c609a47f-a27a-4a9e-b3bc-6e8f7dfafc01-operator-scripts\") pod \"nova-api-db-create-wdldd\" (UID: \"c609a47f-a27a-4a9e-b3bc-6e8f7dfafc01\") " pod="openstack/nova-api-db-create-wdldd" Nov 21 14:05:22 crc kubenswrapper[5133]: I1121 14:05:22.016218 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-b887d"] Nov 21 14:05:22 crc kubenswrapper[5133]: I1121 14:05:22.017466 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-b887d" Nov 21 14:05:22 crc kubenswrapper[5133]: I1121 14:05:22.024929 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-b887d"] Nov 21 14:05:22 crc kubenswrapper[5133]: I1121 14:05:22.046313 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gskk5\" (UniqueName: \"kubernetes.io/projected/c609a47f-a27a-4a9e-b3bc-6e8f7dfafc01-kube-api-access-gskk5\") pod \"nova-api-db-create-wdldd\" (UID: \"c609a47f-a27a-4a9e-b3bc-6e8f7dfafc01\") " pod="openstack/nova-api-db-create-wdldd" Nov 21 14:05:22 crc kubenswrapper[5133]: I1121 14:05:22.097056 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-ad6a-account-create-5whkz"] Nov 21 14:05:22 crc kubenswrapper[5133]: I1121 14:05:22.098753 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-ad6a-account-create-5whkz" Nov 21 14:05:22 crc kubenswrapper[5133]: I1121 14:05:22.102579 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Nov 21 14:05:22 crc kubenswrapper[5133]: I1121 14:05:22.111423 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-ad6a-account-create-5whkz"] Nov 21 14:05:22 crc kubenswrapper[5133]: I1121 14:05:22.111506 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-wdldd" Nov 21 14:05:22 crc kubenswrapper[5133]: I1121 14:05:22.113095 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bc741b72-a799-418c-8071-12d8027de48a-operator-scripts\") pod \"nova-cell1-db-create-b887d\" (UID: \"bc741b72-a799-418c-8071-12d8027de48a\") " pod="openstack/nova-cell1-db-create-b887d" Nov 21 14:05:22 crc kubenswrapper[5133]: I1121 14:05:22.113192 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7nvpv\" (UniqueName: \"kubernetes.io/projected/bc741b72-a799-418c-8071-12d8027de48a-kube-api-access-7nvpv\") pod \"nova-cell1-db-create-b887d\" (UID: \"bc741b72-a799-418c-8071-12d8027de48a\") " pod="openstack/nova-cell1-db-create-b887d" Nov 21 14:05:22 crc kubenswrapper[5133]: I1121 14:05:22.113223 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/07bf540e-b52e-48fa-8c2d-2b95483089c4-operator-scripts\") pod \"nova-api-128b-account-create-rbwdb\" (UID: \"07bf540e-b52e-48fa-8c2d-2b95483089c4\") " pod="openstack/nova-api-128b-account-create-rbwdb" Nov 21 14:05:22 crc kubenswrapper[5133]: I1121 14:05:22.113466 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8skkg\" (UniqueName: \"kubernetes.io/projected/7f29a717-fad3-49bb-96bb-6d753e70d414-kube-api-access-8skkg\") pod \"nova-cell0-db-create-p6q9z\" (UID: \"7f29a717-fad3-49bb-96bb-6d753e70d414\") " pod="openstack/nova-cell0-db-create-p6q9z" Nov 21 14:05:22 crc kubenswrapper[5133]: I1121 14:05:22.113492 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vqr4s\" (UniqueName: \"kubernetes.io/projected/07bf540e-b52e-48fa-8c2d-2b95483089c4-kube-api-access-vqr4s\") pod \"nova-api-128b-account-create-rbwdb\" (UID: \"07bf540e-b52e-48fa-8c2d-2b95483089c4\") " pod="openstack/nova-api-128b-account-create-rbwdb" Nov 21 14:05:22 crc kubenswrapper[5133]: I1121 14:05:22.113523 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7f29a717-fad3-49bb-96bb-6d753e70d414-operator-scripts\") pod \"nova-cell0-db-create-p6q9z\" (UID: \"7f29a717-fad3-49bb-96bb-6d753e70d414\") " pod="openstack/nova-cell0-db-create-p6q9z" Nov 21 14:05:22 crc kubenswrapper[5133]: I1121 14:05:22.114367 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7f29a717-fad3-49bb-96bb-6d753e70d414-operator-scripts\") pod \"nova-cell0-db-create-p6q9z\" (UID: \"7f29a717-fad3-49bb-96bb-6d753e70d414\") " pod="openstack/nova-cell0-db-create-p6q9z" Nov 21 14:05:22 crc kubenswrapper[5133]: I1121 14:05:22.114423 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/07bf540e-b52e-48fa-8c2d-2b95483089c4-operator-scripts\") pod \"nova-api-128b-account-create-rbwdb\" (UID: \"07bf540e-b52e-48fa-8c2d-2b95483089c4\") " pod="openstack/nova-api-128b-account-create-rbwdb" Nov 21 14:05:22 crc kubenswrapper[5133]: I1121 14:05:22.155621 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vqr4s\" (UniqueName: \"kubernetes.io/projected/07bf540e-b52e-48fa-8c2d-2b95483089c4-kube-api-access-vqr4s\") pod \"nova-api-128b-account-create-rbwdb\" (UID: \"07bf540e-b52e-48fa-8c2d-2b95483089c4\") " pod="openstack/nova-api-128b-account-create-rbwdb" Nov 21 14:05:22 crc kubenswrapper[5133]: I1121 14:05:22.159694 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8skkg\" (UniqueName: \"kubernetes.io/projected/7f29a717-fad3-49bb-96bb-6d753e70d414-kube-api-access-8skkg\") pod \"nova-cell0-db-create-p6q9z\" (UID: \"7f29a717-fad3-49bb-96bb-6d753e70d414\") " pod="openstack/nova-cell0-db-create-p6q9z" Nov 21 14:05:22 crc kubenswrapper[5133]: I1121 14:05:22.218590 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bc741b72-a799-418c-8071-12d8027de48a-operator-scripts\") pod \"nova-cell1-db-create-b887d\" (UID: \"bc741b72-a799-418c-8071-12d8027de48a\") " pod="openstack/nova-cell1-db-create-b887d" Nov 21 14:05:22 crc kubenswrapper[5133]: I1121 14:05:22.219201 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c9efa168-aaaa-43e7-8cce-6f6797585494-operator-scripts\") pod \"nova-cell0-ad6a-account-create-5whkz\" (UID: \"c9efa168-aaaa-43e7-8cce-6f6797585494\") " pod="openstack/nova-cell0-ad6a-account-create-5whkz" Nov 21 14:05:22 crc kubenswrapper[5133]: I1121 14:05:22.219359 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7nvpv\" (UniqueName: \"kubernetes.io/projected/bc741b72-a799-418c-8071-12d8027de48a-kube-api-access-7nvpv\") pod \"nova-cell1-db-create-b887d\" (UID: \"bc741b72-a799-418c-8071-12d8027de48a\") " pod="openstack/nova-cell1-db-create-b887d" Nov 21 14:05:22 crc kubenswrapper[5133]: I1121 14:05:22.219408 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pvl68\" (UniqueName: \"kubernetes.io/projected/c9efa168-aaaa-43e7-8cce-6f6797585494-kube-api-access-pvl68\") pod \"nova-cell0-ad6a-account-create-5whkz\" (UID: \"c9efa168-aaaa-43e7-8cce-6f6797585494\") " pod="openstack/nova-cell0-ad6a-account-create-5whkz" Nov 21 14:05:22 crc kubenswrapper[5133]: I1121 14:05:22.220359 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bc741b72-a799-418c-8071-12d8027de48a-operator-scripts\") pod \"nova-cell1-db-create-b887d\" (UID: \"bc741b72-a799-418c-8071-12d8027de48a\") " pod="openstack/nova-cell1-db-create-b887d" Nov 21 14:05:22 crc kubenswrapper[5133]: I1121 14:05:22.229748 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-p6q9z" Nov 21 14:05:22 crc kubenswrapper[5133]: I1121 14:05:22.246218 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7nvpv\" (UniqueName: \"kubernetes.io/projected/bc741b72-a799-418c-8071-12d8027de48a-kube-api-access-7nvpv\") pod \"nova-cell1-db-create-b887d\" (UID: \"bc741b72-a799-418c-8071-12d8027de48a\") " pod="openstack/nova-cell1-db-create-b887d" Nov 21 14:05:22 crc kubenswrapper[5133]: I1121 14:05:22.254455 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-128b-account-create-rbwdb" Nov 21 14:05:23 crc kubenswrapper[5133]: I1121 14:05:22.317941 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-d999-account-create-5tk8j"] Nov 21 14:05:23 crc kubenswrapper[5133]: I1121 14:05:22.321302 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c9efa168-aaaa-43e7-8cce-6f6797585494-operator-scripts\") pod \"nova-cell0-ad6a-account-create-5whkz\" (UID: \"c9efa168-aaaa-43e7-8cce-6f6797585494\") " pod="openstack/nova-cell0-ad6a-account-create-5whkz" Nov 21 14:05:23 crc kubenswrapper[5133]: I1121 14:05:22.321793 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pvl68\" (UniqueName: \"kubernetes.io/projected/c9efa168-aaaa-43e7-8cce-6f6797585494-kube-api-access-pvl68\") pod \"nova-cell0-ad6a-account-create-5whkz\" (UID: \"c9efa168-aaaa-43e7-8cce-6f6797585494\") " pod="openstack/nova-cell0-ad6a-account-create-5whkz" Nov 21 14:05:23 crc kubenswrapper[5133]: I1121 14:05:22.322664 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-d999-account-create-5tk8j" Nov 21 14:05:23 crc kubenswrapper[5133]: I1121 14:05:22.322976 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c9efa168-aaaa-43e7-8cce-6f6797585494-operator-scripts\") pod \"nova-cell0-ad6a-account-create-5whkz\" (UID: \"c9efa168-aaaa-43e7-8cce-6f6797585494\") " pod="openstack/nova-cell0-ad6a-account-create-5whkz" Nov 21 14:05:23 crc kubenswrapper[5133]: I1121 14:05:22.327673 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Nov 21 14:05:23 crc kubenswrapper[5133]: I1121 14:05:22.336357 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-b887d" Nov 21 14:05:23 crc kubenswrapper[5133]: I1121 14:05:22.337102 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-d999-account-create-5tk8j"] Nov 21 14:05:23 crc kubenswrapper[5133]: I1121 14:05:22.344171 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pvl68\" (UniqueName: \"kubernetes.io/projected/c9efa168-aaaa-43e7-8cce-6f6797585494-kube-api-access-pvl68\") pod \"nova-cell0-ad6a-account-create-5whkz\" (UID: \"c9efa168-aaaa-43e7-8cce-6f6797585494\") " pod="openstack/nova-cell0-ad6a-account-create-5whkz" Nov 21 14:05:23 crc kubenswrapper[5133]: I1121 14:05:22.423917 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1a65a32c-3d97-4037-805b-07a5236fbaae-operator-scripts\") pod \"nova-cell1-d999-account-create-5tk8j\" (UID: \"1a65a32c-3d97-4037-805b-07a5236fbaae\") " pod="openstack/nova-cell1-d999-account-create-5tk8j" Nov 21 14:05:23 crc kubenswrapper[5133]: I1121 14:05:22.424093 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jhm8j\" (UniqueName: \"kubernetes.io/projected/1a65a32c-3d97-4037-805b-07a5236fbaae-kube-api-access-jhm8j\") pod \"nova-cell1-d999-account-create-5tk8j\" (UID: \"1a65a32c-3d97-4037-805b-07a5236fbaae\") " pod="openstack/nova-cell1-d999-account-create-5tk8j" Nov 21 14:05:23 crc kubenswrapper[5133]: I1121 14:05:22.424448 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-ad6a-account-create-5whkz" Nov 21 14:05:23 crc kubenswrapper[5133]: I1121 14:05:22.525598 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jhm8j\" (UniqueName: \"kubernetes.io/projected/1a65a32c-3d97-4037-805b-07a5236fbaae-kube-api-access-jhm8j\") pod \"nova-cell1-d999-account-create-5tk8j\" (UID: \"1a65a32c-3d97-4037-805b-07a5236fbaae\") " pod="openstack/nova-cell1-d999-account-create-5tk8j" Nov 21 14:05:23 crc kubenswrapper[5133]: I1121 14:05:22.525841 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1a65a32c-3d97-4037-805b-07a5236fbaae-operator-scripts\") pod \"nova-cell1-d999-account-create-5tk8j\" (UID: \"1a65a32c-3d97-4037-805b-07a5236fbaae\") " pod="openstack/nova-cell1-d999-account-create-5tk8j" Nov 21 14:05:23 crc kubenswrapper[5133]: I1121 14:05:22.527845 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1a65a32c-3d97-4037-805b-07a5236fbaae-operator-scripts\") pod \"nova-cell1-d999-account-create-5tk8j\" (UID: \"1a65a32c-3d97-4037-805b-07a5236fbaae\") " pod="openstack/nova-cell1-d999-account-create-5tk8j" Nov 21 14:05:23 crc kubenswrapper[5133]: I1121 14:05:22.557399 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jhm8j\" (UniqueName: \"kubernetes.io/projected/1a65a32c-3d97-4037-805b-07a5236fbaae-kube-api-access-jhm8j\") pod \"nova-cell1-d999-account-create-5tk8j\" (UID: \"1a65a32c-3d97-4037-805b-07a5236fbaae\") " pod="openstack/nova-cell1-d999-account-create-5tk8j" Nov 21 14:05:23 crc kubenswrapper[5133]: I1121 14:05:22.690558 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-d999-account-create-5tk8j" Nov 21 14:05:23 crc kubenswrapper[5133]: I1121 14:05:23.314249 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-wdldd"] Nov 21 14:05:23 crc kubenswrapper[5133]: I1121 14:05:23.444851 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-p6q9z"] Nov 21 14:05:23 crc kubenswrapper[5133]: I1121 14:05:23.449587 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-ad6a-account-create-5whkz"] Nov 21 14:05:23 crc kubenswrapper[5133]: I1121 14:05:23.469332 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-d999-account-create-5tk8j"] Nov 21 14:05:23 crc kubenswrapper[5133]: I1121 14:05:23.492462 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-128b-account-create-rbwdb"] Nov 21 14:05:23 crc kubenswrapper[5133]: I1121 14:05:23.495636 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Nov 21 14:05:23 crc kubenswrapper[5133]: W1121 14:05:23.500086 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod07bf540e_b52e_48fa_8c2d_2b95483089c4.slice/crio-8a2cb5d6913c8359880ed23ac08f7417e27636bbc413ca7682364bd24234df4d WatchSource:0}: Error finding container 8a2cb5d6913c8359880ed23ac08f7417e27636bbc413ca7682364bd24234df4d: Status 404 returned error can't find the container with id 8a2cb5d6913c8359880ed23ac08f7417e27636bbc413ca7682364bd24234df4d Nov 21 14:05:23 crc kubenswrapper[5133]: I1121 14:05:23.516585 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-b887d"] Nov 21 14:05:23 crc kubenswrapper[5133]: I1121 14:05:23.563085 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Nov 21 14:05:23 crc kubenswrapper[5133]: I1121 14:05:23.682472 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-p6q9z" event={"ID":"7f29a717-fad3-49bb-96bb-6d753e70d414","Type":"ContainerStarted","Data":"ad4ae43840ccdbb61fa73aa64564f52465e840fd6f7567a847da0890e0a8f6c9"} Nov 21 14:05:23 crc kubenswrapper[5133]: I1121 14:05:23.686547 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-d999-account-create-5tk8j" event={"ID":"1a65a32c-3d97-4037-805b-07a5236fbaae","Type":"ContainerStarted","Data":"fa3f4805e2975b07077316fa2213305e0758e039531329a55b3f394a9ab0c863"} Nov 21 14:05:23 crc kubenswrapper[5133]: I1121 14:05:23.688748 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-wdldd" event={"ID":"c609a47f-a27a-4a9e-b3bc-6e8f7dfafc01","Type":"ContainerStarted","Data":"4b59e8149c82d8cc93880a618af3bf50d2a740da199903690edf595cf288f49b"} Nov 21 14:05:23 crc kubenswrapper[5133]: I1121 14:05:23.688798 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-wdldd" event={"ID":"c609a47f-a27a-4a9e-b3bc-6e8f7dfafc01","Type":"ContainerStarted","Data":"198b419d30d030704e1b7ce10f6d9893080662c23ed060876212aff369e6b485"} Nov 21 14:05:23 crc kubenswrapper[5133]: I1121 14:05:23.693284 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-ad6a-account-create-5whkz" event={"ID":"c9efa168-aaaa-43e7-8cce-6f6797585494","Type":"ContainerStarted","Data":"3c2f84051699ec6dcd6242154af2955c96dc810dc24dae25640c56ba69810b96"} Nov 21 14:05:23 crc kubenswrapper[5133]: I1121 14:05:23.697309 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-128b-account-create-rbwdb" event={"ID":"07bf540e-b52e-48fa-8c2d-2b95483089c4","Type":"ContainerStarted","Data":"8a2cb5d6913c8359880ed23ac08f7417e27636bbc413ca7682364bd24234df4d"} Nov 21 14:05:23 crc kubenswrapper[5133]: I1121 14:05:23.699692 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-b887d" event={"ID":"bc741b72-a799-418c-8071-12d8027de48a","Type":"ContainerStarted","Data":"29a523cc51337b44a533368a7f614c2a02e984e7569c5dab24cd55e4f89297ed"} Nov 21 14:05:23 crc kubenswrapper[5133]: I1121 14:05:23.712416 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-db-create-wdldd" podStartSLOduration=2.712394108 podStartE2EDuration="2.712394108s" podCreationTimestamp="2025-11-21 14:05:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 14:05:23.707089506 +0000 UTC m=+1383.504921774" watchObservedRunningTime="2025-11-21 14:05:23.712394108 +0000 UTC m=+1383.510226356" Nov 21 14:05:24 crc kubenswrapper[5133]: E1121 14:05:24.210065 5133 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1a65a32c_3d97_4037_805b_07a5236fbaae.slice/crio-66a435a0ab3d27cf95142f55dde169936e4fc6034470dd3e5a5c8401d6cd6a5c.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7f29a717_fad3_49bb_96bb_6d753e70d414.slice/crio-85feb45cdd702ace893e626bea50249d4b3be788fd6fc9c099415fcba60b41f5.scope\": RecentStats: unable to find data in memory cache]" Nov 21 14:05:24 crc kubenswrapper[5133]: I1121 14:05:24.710232 5133 generic.go:334] "Generic (PLEG): container finished" podID="1a65a32c-3d97-4037-805b-07a5236fbaae" containerID="66a435a0ab3d27cf95142f55dde169936e4fc6034470dd3e5a5c8401d6cd6a5c" exitCode=0 Nov 21 14:05:24 crc kubenswrapper[5133]: I1121 14:05:24.710370 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-d999-account-create-5tk8j" event={"ID":"1a65a32c-3d97-4037-805b-07a5236fbaae","Type":"ContainerDied","Data":"66a435a0ab3d27cf95142f55dde169936e4fc6034470dd3e5a5c8401d6cd6a5c"} Nov 21 14:05:24 crc kubenswrapper[5133]: I1121 14:05:24.717627 5133 generic.go:334] "Generic (PLEG): container finished" podID="c609a47f-a27a-4a9e-b3bc-6e8f7dfafc01" containerID="4b59e8149c82d8cc93880a618af3bf50d2a740da199903690edf595cf288f49b" exitCode=0 Nov 21 14:05:24 crc kubenswrapper[5133]: I1121 14:05:24.717903 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-wdldd" event={"ID":"c609a47f-a27a-4a9e-b3bc-6e8f7dfafc01","Type":"ContainerDied","Data":"4b59e8149c82d8cc93880a618af3bf50d2a740da199903690edf595cf288f49b"} Nov 21 14:05:24 crc kubenswrapper[5133]: I1121 14:05:24.719710 5133 generic.go:334] "Generic (PLEG): container finished" podID="c9efa168-aaaa-43e7-8cce-6f6797585494" containerID="4b91809b0e979932e9f078f62f8ba8bca15983a68e9e784a3727a3081ba8e8ca" exitCode=0 Nov 21 14:05:24 crc kubenswrapper[5133]: I1121 14:05:24.719755 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-ad6a-account-create-5whkz" event={"ID":"c9efa168-aaaa-43e7-8cce-6f6797585494","Type":"ContainerDied","Data":"4b91809b0e979932e9f078f62f8ba8bca15983a68e9e784a3727a3081ba8e8ca"} Nov 21 14:05:24 crc kubenswrapper[5133]: I1121 14:05:24.720986 5133 generic.go:334] "Generic (PLEG): container finished" podID="07bf540e-b52e-48fa-8c2d-2b95483089c4" containerID="6ea3607fd21d89052b574ed3d55d0e742377c327e98bbdd13b81ee269d042098" exitCode=0 Nov 21 14:05:24 crc kubenswrapper[5133]: I1121 14:05:24.721071 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-128b-account-create-rbwdb" event={"ID":"07bf540e-b52e-48fa-8c2d-2b95483089c4","Type":"ContainerDied","Data":"6ea3607fd21d89052b574ed3d55d0e742377c327e98bbdd13b81ee269d042098"} Nov 21 14:05:24 crc kubenswrapper[5133]: I1121 14:05:24.722548 5133 generic.go:334] "Generic (PLEG): container finished" podID="bc741b72-a799-418c-8071-12d8027de48a" containerID="8e1c368d98e96f298e4e9df9f519f49757c7ef2a1a422bfcfaf9033f3a4bc012" exitCode=0 Nov 21 14:05:24 crc kubenswrapper[5133]: I1121 14:05:24.722593 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-b887d" event={"ID":"bc741b72-a799-418c-8071-12d8027de48a","Type":"ContainerDied","Data":"8e1c368d98e96f298e4e9df9f519f49757c7ef2a1a422bfcfaf9033f3a4bc012"} Nov 21 14:05:24 crc kubenswrapper[5133]: I1121 14:05:24.723919 5133 generic.go:334] "Generic (PLEG): container finished" podID="7f29a717-fad3-49bb-96bb-6d753e70d414" containerID="85feb45cdd702ace893e626bea50249d4b3be788fd6fc9c099415fcba60b41f5" exitCode=0 Nov 21 14:05:24 crc kubenswrapper[5133]: I1121 14:05:24.723954 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-p6q9z" event={"ID":"7f29a717-fad3-49bb-96bb-6d753e70d414","Type":"ContainerDied","Data":"85feb45cdd702ace893e626bea50249d4b3be788fd6fc9c099415fcba60b41f5"} Nov 21 14:05:26 crc kubenswrapper[5133]: I1121 14:05:26.168974 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-d999-account-create-5tk8j" Nov 21 14:05:26 crc kubenswrapper[5133]: I1121 14:05:26.295769 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhm8j\" (UniqueName: \"kubernetes.io/projected/1a65a32c-3d97-4037-805b-07a5236fbaae-kube-api-access-jhm8j\") pod \"1a65a32c-3d97-4037-805b-07a5236fbaae\" (UID: \"1a65a32c-3d97-4037-805b-07a5236fbaae\") " Nov 21 14:05:26 crc kubenswrapper[5133]: I1121 14:05:26.295939 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1a65a32c-3d97-4037-805b-07a5236fbaae-operator-scripts\") pod \"1a65a32c-3d97-4037-805b-07a5236fbaae\" (UID: \"1a65a32c-3d97-4037-805b-07a5236fbaae\") " Nov 21 14:05:26 crc kubenswrapper[5133]: I1121 14:05:26.298560 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1a65a32c-3d97-4037-805b-07a5236fbaae-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "1a65a32c-3d97-4037-805b-07a5236fbaae" (UID: "1a65a32c-3d97-4037-805b-07a5236fbaae"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:05:26 crc kubenswrapper[5133]: I1121 14:05:26.310182 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1a65a32c-3d97-4037-805b-07a5236fbaae-kube-api-access-jhm8j" (OuterVolumeSpecName: "kube-api-access-jhm8j") pod "1a65a32c-3d97-4037-805b-07a5236fbaae" (UID: "1a65a32c-3d97-4037-805b-07a5236fbaae"). InnerVolumeSpecName "kube-api-access-jhm8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:05:26 crc kubenswrapper[5133]: I1121 14:05:26.398484 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhm8j\" (UniqueName: \"kubernetes.io/projected/1a65a32c-3d97-4037-805b-07a5236fbaae-kube-api-access-jhm8j\") on node \"crc\" DevicePath \"\"" Nov 21 14:05:26 crc kubenswrapper[5133]: I1121 14:05:26.398525 5133 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1a65a32c-3d97-4037-805b-07a5236fbaae-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 14:05:26 crc kubenswrapper[5133]: I1121 14:05:26.434070 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-128b-account-create-rbwdb" Nov 21 14:05:26 crc kubenswrapper[5133]: I1121 14:05:26.438431 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-b887d" Nov 21 14:05:26 crc kubenswrapper[5133]: I1121 14:05:26.453641 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-ad6a-account-create-5whkz" Nov 21 14:05:26 crc kubenswrapper[5133]: I1121 14:05:26.469270 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-p6q9z" Nov 21 14:05:26 crc kubenswrapper[5133]: I1121 14:05:26.488558 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-wdldd" Nov 21 14:05:26 crc kubenswrapper[5133]: I1121 14:05:26.608160 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pvl68\" (UniqueName: \"kubernetes.io/projected/c9efa168-aaaa-43e7-8cce-6f6797585494-kube-api-access-pvl68\") pod \"c9efa168-aaaa-43e7-8cce-6f6797585494\" (UID: \"c9efa168-aaaa-43e7-8cce-6f6797585494\") " Nov 21 14:05:26 crc kubenswrapper[5133]: I1121 14:05:26.608222 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vqr4s\" (UniqueName: \"kubernetes.io/projected/07bf540e-b52e-48fa-8c2d-2b95483089c4-kube-api-access-vqr4s\") pod \"07bf540e-b52e-48fa-8c2d-2b95483089c4\" (UID: \"07bf540e-b52e-48fa-8c2d-2b95483089c4\") " Nov 21 14:05:26 crc kubenswrapper[5133]: I1121 14:05:26.608293 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gskk5\" (UniqueName: \"kubernetes.io/projected/c609a47f-a27a-4a9e-b3bc-6e8f7dfafc01-kube-api-access-gskk5\") pod \"c609a47f-a27a-4a9e-b3bc-6e8f7dfafc01\" (UID: \"c609a47f-a27a-4a9e-b3bc-6e8f7dfafc01\") " Nov 21 14:05:26 crc kubenswrapper[5133]: I1121 14:05:26.608334 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c9efa168-aaaa-43e7-8cce-6f6797585494-operator-scripts\") pod \"c9efa168-aaaa-43e7-8cce-6f6797585494\" (UID: \"c9efa168-aaaa-43e7-8cce-6f6797585494\") " Nov 21 14:05:26 crc kubenswrapper[5133]: I1121 14:05:26.608371 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/07bf540e-b52e-48fa-8c2d-2b95483089c4-operator-scripts\") pod \"07bf540e-b52e-48fa-8c2d-2b95483089c4\" (UID: \"07bf540e-b52e-48fa-8c2d-2b95483089c4\") " Nov 21 14:05:26 crc kubenswrapper[5133]: I1121 14:05:26.608405 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7f29a717-fad3-49bb-96bb-6d753e70d414-operator-scripts\") pod \"7f29a717-fad3-49bb-96bb-6d753e70d414\" (UID: \"7f29a717-fad3-49bb-96bb-6d753e70d414\") " Nov 21 14:05:26 crc kubenswrapper[5133]: I1121 14:05:26.608462 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7nvpv\" (UniqueName: \"kubernetes.io/projected/bc741b72-a799-418c-8071-12d8027de48a-kube-api-access-7nvpv\") pod \"bc741b72-a799-418c-8071-12d8027de48a\" (UID: \"bc741b72-a799-418c-8071-12d8027de48a\") " Nov 21 14:05:26 crc kubenswrapper[5133]: I1121 14:05:26.608486 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c609a47f-a27a-4a9e-b3bc-6e8f7dfafc01-operator-scripts\") pod \"c609a47f-a27a-4a9e-b3bc-6e8f7dfafc01\" (UID: \"c609a47f-a27a-4a9e-b3bc-6e8f7dfafc01\") " Nov 21 14:05:26 crc kubenswrapper[5133]: I1121 14:05:26.608516 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8skkg\" (UniqueName: \"kubernetes.io/projected/7f29a717-fad3-49bb-96bb-6d753e70d414-kube-api-access-8skkg\") pod \"7f29a717-fad3-49bb-96bb-6d753e70d414\" (UID: \"7f29a717-fad3-49bb-96bb-6d753e70d414\") " Nov 21 14:05:26 crc kubenswrapper[5133]: I1121 14:05:26.608575 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bc741b72-a799-418c-8071-12d8027de48a-operator-scripts\") pod \"bc741b72-a799-418c-8071-12d8027de48a\" (UID: \"bc741b72-a799-418c-8071-12d8027de48a\") " Nov 21 14:05:26 crc kubenswrapper[5133]: I1121 14:05:26.609251 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7f29a717-fad3-49bb-96bb-6d753e70d414-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "7f29a717-fad3-49bb-96bb-6d753e70d414" (UID: "7f29a717-fad3-49bb-96bb-6d753e70d414"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:05:26 crc kubenswrapper[5133]: I1121 14:05:26.609261 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c609a47f-a27a-4a9e-b3bc-6e8f7dfafc01-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c609a47f-a27a-4a9e-b3bc-6e8f7dfafc01" (UID: "c609a47f-a27a-4a9e-b3bc-6e8f7dfafc01"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:05:26 crc kubenswrapper[5133]: I1121 14:05:26.609320 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/07bf540e-b52e-48fa-8c2d-2b95483089c4-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "07bf540e-b52e-48fa-8c2d-2b95483089c4" (UID: "07bf540e-b52e-48fa-8c2d-2b95483089c4"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:05:26 crc kubenswrapper[5133]: I1121 14:05:26.610117 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bc741b72-a799-418c-8071-12d8027de48a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "bc741b72-a799-418c-8071-12d8027de48a" (UID: "bc741b72-a799-418c-8071-12d8027de48a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:05:26 crc kubenswrapper[5133]: I1121 14:05:26.610477 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c9efa168-aaaa-43e7-8cce-6f6797585494-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c9efa168-aaaa-43e7-8cce-6f6797585494" (UID: "c9efa168-aaaa-43e7-8cce-6f6797585494"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:05:26 crc kubenswrapper[5133]: I1121 14:05:26.614228 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/07bf540e-b52e-48fa-8c2d-2b95483089c4-kube-api-access-vqr4s" (OuterVolumeSpecName: "kube-api-access-vqr4s") pod "07bf540e-b52e-48fa-8c2d-2b95483089c4" (UID: "07bf540e-b52e-48fa-8c2d-2b95483089c4"). InnerVolumeSpecName "kube-api-access-vqr4s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:05:26 crc kubenswrapper[5133]: I1121 14:05:26.614387 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc741b72-a799-418c-8071-12d8027de48a-kube-api-access-7nvpv" (OuterVolumeSpecName: "kube-api-access-7nvpv") pod "bc741b72-a799-418c-8071-12d8027de48a" (UID: "bc741b72-a799-418c-8071-12d8027de48a"). InnerVolumeSpecName "kube-api-access-7nvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:05:26 crc kubenswrapper[5133]: I1121 14:05:26.614546 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c9efa168-aaaa-43e7-8cce-6f6797585494-kube-api-access-pvl68" (OuterVolumeSpecName: "kube-api-access-pvl68") pod "c9efa168-aaaa-43e7-8cce-6f6797585494" (UID: "c9efa168-aaaa-43e7-8cce-6f6797585494"). InnerVolumeSpecName "kube-api-access-pvl68". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:05:26 crc kubenswrapper[5133]: I1121 14:05:26.615052 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c609a47f-a27a-4a9e-b3bc-6e8f7dfafc01-kube-api-access-gskk5" (OuterVolumeSpecName: "kube-api-access-gskk5") pod "c609a47f-a27a-4a9e-b3bc-6e8f7dfafc01" (UID: "c609a47f-a27a-4a9e-b3bc-6e8f7dfafc01"). InnerVolumeSpecName "kube-api-access-gskk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:05:26 crc kubenswrapper[5133]: I1121 14:05:26.616638 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7f29a717-fad3-49bb-96bb-6d753e70d414-kube-api-access-8skkg" (OuterVolumeSpecName: "kube-api-access-8skkg") pod "7f29a717-fad3-49bb-96bb-6d753e70d414" (UID: "7f29a717-fad3-49bb-96bb-6d753e70d414"). InnerVolumeSpecName "kube-api-access-8skkg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:05:26 crc kubenswrapper[5133]: I1121 14:05:26.710889 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gskk5\" (UniqueName: \"kubernetes.io/projected/c609a47f-a27a-4a9e-b3bc-6e8f7dfafc01-kube-api-access-gskk5\") on node \"crc\" DevicePath \"\"" Nov 21 14:05:26 crc kubenswrapper[5133]: I1121 14:05:26.710938 5133 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c9efa168-aaaa-43e7-8cce-6f6797585494-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 14:05:26 crc kubenswrapper[5133]: I1121 14:05:26.710949 5133 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/07bf540e-b52e-48fa-8c2d-2b95483089c4-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 14:05:26 crc kubenswrapper[5133]: I1121 14:05:26.710962 5133 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7f29a717-fad3-49bb-96bb-6d753e70d414-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 14:05:26 crc kubenswrapper[5133]: I1121 14:05:26.710973 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7nvpv\" (UniqueName: \"kubernetes.io/projected/bc741b72-a799-418c-8071-12d8027de48a-kube-api-access-7nvpv\") on node \"crc\" DevicePath \"\"" Nov 21 14:05:26 crc kubenswrapper[5133]: I1121 14:05:26.710981 5133 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c609a47f-a27a-4a9e-b3bc-6e8f7dfafc01-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 14:05:26 crc kubenswrapper[5133]: I1121 14:05:26.710989 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8skkg\" (UniqueName: \"kubernetes.io/projected/7f29a717-fad3-49bb-96bb-6d753e70d414-kube-api-access-8skkg\") on node \"crc\" DevicePath \"\"" Nov 21 14:05:26 crc kubenswrapper[5133]: I1121 14:05:26.711014 5133 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bc741b72-a799-418c-8071-12d8027de48a-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 14:05:26 crc kubenswrapper[5133]: I1121 14:05:26.711025 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pvl68\" (UniqueName: \"kubernetes.io/projected/c9efa168-aaaa-43e7-8cce-6f6797585494-kube-api-access-pvl68\") on node \"crc\" DevicePath \"\"" Nov 21 14:05:26 crc kubenswrapper[5133]: I1121 14:05:26.711034 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vqr4s\" (UniqueName: \"kubernetes.io/projected/07bf540e-b52e-48fa-8c2d-2b95483089c4-kube-api-access-vqr4s\") on node \"crc\" DevicePath \"\"" Nov 21 14:05:26 crc kubenswrapper[5133]: I1121 14:05:26.742944 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-128b-account-create-rbwdb" Nov 21 14:05:26 crc kubenswrapper[5133]: I1121 14:05:26.742944 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-128b-account-create-rbwdb" event={"ID":"07bf540e-b52e-48fa-8c2d-2b95483089c4","Type":"ContainerDied","Data":"8a2cb5d6913c8359880ed23ac08f7417e27636bbc413ca7682364bd24234df4d"} Nov 21 14:05:26 crc kubenswrapper[5133]: I1121 14:05:26.743070 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8a2cb5d6913c8359880ed23ac08f7417e27636bbc413ca7682364bd24234df4d" Nov 21 14:05:26 crc kubenswrapper[5133]: I1121 14:05:26.745833 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-b887d" event={"ID":"bc741b72-a799-418c-8071-12d8027de48a","Type":"ContainerDied","Data":"29a523cc51337b44a533368a7f614c2a02e984e7569c5dab24cd55e4f89297ed"} Nov 21 14:05:26 crc kubenswrapper[5133]: I1121 14:05:26.745873 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="29a523cc51337b44a533368a7f614c2a02e984e7569c5dab24cd55e4f89297ed" Nov 21 14:05:26 crc kubenswrapper[5133]: I1121 14:05:26.745940 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-b887d" Nov 21 14:05:26 crc kubenswrapper[5133]: I1121 14:05:26.751667 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-p6q9z" event={"ID":"7f29a717-fad3-49bb-96bb-6d753e70d414","Type":"ContainerDied","Data":"ad4ae43840ccdbb61fa73aa64564f52465e840fd6f7567a847da0890e0a8f6c9"} Nov 21 14:05:26 crc kubenswrapper[5133]: I1121 14:05:26.751698 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ad4ae43840ccdbb61fa73aa64564f52465e840fd6f7567a847da0890e0a8f6c9" Nov 21 14:05:26 crc kubenswrapper[5133]: I1121 14:05:26.751766 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-p6q9z" Nov 21 14:05:26 crc kubenswrapper[5133]: I1121 14:05:26.754612 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-d999-account-create-5tk8j" Nov 21 14:05:26 crc kubenswrapper[5133]: I1121 14:05:26.754671 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-d999-account-create-5tk8j" event={"ID":"1a65a32c-3d97-4037-805b-07a5236fbaae","Type":"ContainerDied","Data":"fa3f4805e2975b07077316fa2213305e0758e039531329a55b3f394a9ab0c863"} Nov 21 14:05:26 crc kubenswrapper[5133]: I1121 14:05:26.754734 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fa3f4805e2975b07077316fa2213305e0758e039531329a55b3f394a9ab0c863" Nov 21 14:05:26 crc kubenswrapper[5133]: I1121 14:05:26.757478 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-wdldd" event={"ID":"c609a47f-a27a-4a9e-b3bc-6e8f7dfafc01","Type":"ContainerDied","Data":"198b419d30d030704e1b7ce10f6d9893080662c23ed060876212aff369e6b485"} Nov 21 14:05:26 crc kubenswrapper[5133]: I1121 14:05:26.757526 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="198b419d30d030704e1b7ce10f6d9893080662c23ed060876212aff369e6b485" Nov 21 14:05:26 crc kubenswrapper[5133]: I1121 14:05:26.757500 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-wdldd" Nov 21 14:05:26 crc kubenswrapper[5133]: I1121 14:05:26.760309 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-ad6a-account-create-5whkz" event={"ID":"c9efa168-aaaa-43e7-8cce-6f6797585494","Type":"ContainerDied","Data":"3c2f84051699ec6dcd6242154af2955c96dc810dc24dae25640c56ba69810b96"} Nov 21 14:05:26 crc kubenswrapper[5133]: I1121 14:05:26.760358 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3c2f84051699ec6dcd6242154af2955c96dc810dc24dae25640c56ba69810b96" Nov 21 14:05:26 crc kubenswrapper[5133]: I1121 14:05:26.760433 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-ad6a-account-create-5whkz" Nov 21 14:05:32 crc kubenswrapper[5133]: I1121 14:05:32.380146 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-97b9q"] Nov 21 14:05:32 crc kubenswrapper[5133]: E1121 14:05:32.382727 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bc741b72-a799-418c-8071-12d8027de48a" containerName="mariadb-database-create" Nov 21 14:05:32 crc kubenswrapper[5133]: I1121 14:05:32.382751 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="bc741b72-a799-418c-8071-12d8027de48a" containerName="mariadb-database-create" Nov 21 14:05:32 crc kubenswrapper[5133]: E1121 14:05:32.382771 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a65a32c-3d97-4037-805b-07a5236fbaae" containerName="mariadb-account-create" Nov 21 14:05:32 crc kubenswrapper[5133]: I1121 14:05:32.382781 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a65a32c-3d97-4037-805b-07a5236fbaae" containerName="mariadb-account-create" Nov 21 14:05:32 crc kubenswrapper[5133]: E1121 14:05:32.382796 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c9efa168-aaaa-43e7-8cce-6f6797585494" containerName="mariadb-account-create" Nov 21 14:05:32 crc kubenswrapper[5133]: I1121 14:05:32.382807 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="c9efa168-aaaa-43e7-8cce-6f6797585494" containerName="mariadb-account-create" Nov 21 14:05:32 crc kubenswrapper[5133]: E1121 14:05:32.382832 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07bf540e-b52e-48fa-8c2d-2b95483089c4" containerName="mariadb-account-create" Nov 21 14:05:32 crc kubenswrapper[5133]: I1121 14:05:32.382839 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="07bf540e-b52e-48fa-8c2d-2b95483089c4" containerName="mariadb-account-create" Nov 21 14:05:32 crc kubenswrapper[5133]: E1121 14:05:32.382850 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f29a717-fad3-49bb-96bb-6d753e70d414" containerName="mariadb-database-create" Nov 21 14:05:32 crc kubenswrapper[5133]: I1121 14:05:32.382858 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f29a717-fad3-49bb-96bb-6d753e70d414" containerName="mariadb-database-create" Nov 21 14:05:32 crc kubenswrapper[5133]: E1121 14:05:32.382869 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c609a47f-a27a-4a9e-b3bc-6e8f7dfafc01" containerName="mariadb-database-create" Nov 21 14:05:32 crc kubenswrapper[5133]: I1121 14:05:32.382877 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="c609a47f-a27a-4a9e-b3bc-6e8f7dfafc01" containerName="mariadb-database-create" Nov 21 14:05:32 crc kubenswrapper[5133]: I1121 14:05:32.383155 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="1a65a32c-3d97-4037-805b-07a5236fbaae" containerName="mariadb-account-create" Nov 21 14:05:32 crc kubenswrapper[5133]: I1121 14:05:32.383182 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="c609a47f-a27a-4a9e-b3bc-6e8f7dfafc01" containerName="mariadb-database-create" Nov 21 14:05:32 crc kubenswrapper[5133]: I1121 14:05:32.383201 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="c9efa168-aaaa-43e7-8cce-6f6797585494" containerName="mariadb-account-create" Nov 21 14:05:32 crc kubenswrapper[5133]: I1121 14:05:32.383213 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="7f29a717-fad3-49bb-96bb-6d753e70d414" containerName="mariadb-database-create" Nov 21 14:05:32 crc kubenswrapper[5133]: I1121 14:05:32.383222 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="bc741b72-a799-418c-8071-12d8027de48a" containerName="mariadb-database-create" Nov 21 14:05:32 crc kubenswrapper[5133]: I1121 14:05:32.383233 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="07bf540e-b52e-48fa-8c2d-2b95483089c4" containerName="mariadb-account-create" Nov 21 14:05:32 crc kubenswrapper[5133]: I1121 14:05:32.384257 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-97b9q" Nov 21 14:05:32 crc kubenswrapper[5133]: I1121 14:05:32.388872 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 21 14:05:32 crc kubenswrapper[5133]: I1121 14:05:32.388899 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-q862r" Nov 21 14:05:32 crc kubenswrapper[5133]: I1121 14:05:32.389337 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Nov 21 14:05:32 crc kubenswrapper[5133]: I1121 14:05:32.393754 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-97b9q"] Nov 21 14:05:32 crc kubenswrapper[5133]: I1121 14:05:32.440893 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1d6200bb-1d14-4055-b2e3-448d9e43254b-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-97b9q\" (UID: \"1d6200bb-1d14-4055-b2e3-448d9e43254b\") " pod="openstack/nova-cell0-conductor-db-sync-97b9q" Nov 21 14:05:32 crc kubenswrapper[5133]: I1121 14:05:32.441124 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1d6200bb-1d14-4055-b2e3-448d9e43254b-config-data\") pod \"nova-cell0-conductor-db-sync-97b9q\" (UID: \"1d6200bb-1d14-4055-b2e3-448d9e43254b\") " pod="openstack/nova-cell0-conductor-db-sync-97b9q" Nov 21 14:05:32 crc kubenswrapper[5133]: I1121 14:05:32.441155 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1d6200bb-1d14-4055-b2e3-448d9e43254b-scripts\") pod \"nova-cell0-conductor-db-sync-97b9q\" (UID: \"1d6200bb-1d14-4055-b2e3-448d9e43254b\") " pod="openstack/nova-cell0-conductor-db-sync-97b9q" Nov 21 14:05:32 crc kubenswrapper[5133]: I1121 14:05:32.441203 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gpc4b\" (UniqueName: \"kubernetes.io/projected/1d6200bb-1d14-4055-b2e3-448d9e43254b-kube-api-access-gpc4b\") pod \"nova-cell0-conductor-db-sync-97b9q\" (UID: \"1d6200bb-1d14-4055-b2e3-448d9e43254b\") " pod="openstack/nova-cell0-conductor-db-sync-97b9q" Nov 21 14:05:32 crc kubenswrapper[5133]: I1121 14:05:32.542732 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1d6200bb-1d14-4055-b2e3-448d9e43254b-config-data\") pod \"nova-cell0-conductor-db-sync-97b9q\" (UID: \"1d6200bb-1d14-4055-b2e3-448d9e43254b\") " pod="openstack/nova-cell0-conductor-db-sync-97b9q" Nov 21 14:05:32 crc kubenswrapper[5133]: I1121 14:05:32.542808 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1d6200bb-1d14-4055-b2e3-448d9e43254b-scripts\") pod \"nova-cell0-conductor-db-sync-97b9q\" (UID: \"1d6200bb-1d14-4055-b2e3-448d9e43254b\") " pod="openstack/nova-cell0-conductor-db-sync-97b9q" Nov 21 14:05:32 crc kubenswrapper[5133]: I1121 14:05:32.542900 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gpc4b\" (UniqueName: \"kubernetes.io/projected/1d6200bb-1d14-4055-b2e3-448d9e43254b-kube-api-access-gpc4b\") pod \"nova-cell0-conductor-db-sync-97b9q\" (UID: \"1d6200bb-1d14-4055-b2e3-448d9e43254b\") " pod="openstack/nova-cell0-conductor-db-sync-97b9q" Nov 21 14:05:32 crc kubenswrapper[5133]: I1121 14:05:32.542956 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1d6200bb-1d14-4055-b2e3-448d9e43254b-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-97b9q\" (UID: \"1d6200bb-1d14-4055-b2e3-448d9e43254b\") " pod="openstack/nova-cell0-conductor-db-sync-97b9q" Nov 21 14:05:32 crc kubenswrapper[5133]: I1121 14:05:32.551989 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1d6200bb-1d14-4055-b2e3-448d9e43254b-config-data\") pod \"nova-cell0-conductor-db-sync-97b9q\" (UID: \"1d6200bb-1d14-4055-b2e3-448d9e43254b\") " pod="openstack/nova-cell0-conductor-db-sync-97b9q" Nov 21 14:05:32 crc kubenswrapper[5133]: I1121 14:05:32.552768 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1d6200bb-1d14-4055-b2e3-448d9e43254b-scripts\") pod \"nova-cell0-conductor-db-sync-97b9q\" (UID: \"1d6200bb-1d14-4055-b2e3-448d9e43254b\") " pod="openstack/nova-cell0-conductor-db-sync-97b9q" Nov 21 14:05:32 crc kubenswrapper[5133]: I1121 14:05:32.553061 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1d6200bb-1d14-4055-b2e3-448d9e43254b-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-97b9q\" (UID: \"1d6200bb-1d14-4055-b2e3-448d9e43254b\") " pod="openstack/nova-cell0-conductor-db-sync-97b9q" Nov 21 14:05:32 crc kubenswrapper[5133]: I1121 14:05:32.566727 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gpc4b\" (UniqueName: \"kubernetes.io/projected/1d6200bb-1d14-4055-b2e3-448d9e43254b-kube-api-access-gpc4b\") pod \"nova-cell0-conductor-db-sync-97b9q\" (UID: \"1d6200bb-1d14-4055-b2e3-448d9e43254b\") " pod="openstack/nova-cell0-conductor-db-sync-97b9q" Nov 21 14:05:32 crc kubenswrapper[5133]: I1121 14:05:32.715220 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-97b9q" Nov 21 14:05:33 crc kubenswrapper[5133]: I1121 14:05:33.208613 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-97b9q"] Nov 21 14:05:33 crc kubenswrapper[5133]: I1121 14:05:33.866598 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-97b9q" event={"ID":"1d6200bb-1d14-4055-b2e3-448d9e43254b","Type":"ContainerStarted","Data":"42d676380876a5d7892b93263a6009ed38131047ad7ad3220fb1c14a6a85746e"} Nov 21 14:05:39 crc kubenswrapper[5133]: I1121 14:05:39.160159 5133 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="b7aa32bb-3ee7-4954-906d-37262ff4637f" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 21 14:05:39 crc kubenswrapper[5133]: I1121 14:05:39.943814 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-97b9q" event={"ID":"1d6200bb-1d14-4055-b2e3-448d9e43254b","Type":"ContainerStarted","Data":"958ef6915ad5e8b47995172884a4e2caea9f1e9225533aafc54e62a38b8b9e98"} Nov 21 14:05:39 crc kubenswrapper[5133]: I1121 14:05:39.984401 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-97b9q" podStartSLOduration=2.014053617 podStartE2EDuration="7.984367863s" podCreationTimestamp="2025-11-21 14:05:32 +0000 UTC" firstStartedPulling="2025-11-21 14:05:33.219104069 +0000 UTC m=+1393.016936317" lastFinishedPulling="2025-11-21 14:05:39.189418314 +0000 UTC m=+1398.987250563" observedRunningTime="2025-11-21 14:05:39.974495869 +0000 UTC m=+1399.772328117" watchObservedRunningTime="2025-11-21 14:05:39.984367863 +0000 UTC m=+1399.782200121" Nov 21 14:05:44 crc kubenswrapper[5133]: E1121 14:05:44.699511 5133 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb7aa32bb_3ee7_4954_906d_37262ff4637f.slice/crio-conmon-d607039cd6e336e626c7f791fa4173ea4eb51388ee4732d871920d842ca51da0.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb7aa32bb_3ee7_4954_906d_37262ff4637f.slice/crio-d607039cd6e336e626c7f791fa4173ea4eb51388ee4732d871920d842ca51da0.scope\": RecentStats: unable to find data in memory cache]" Nov 21 14:05:44 crc kubenswrapper[5133]: I1121 14:05:44.997581 5133 generic.go:334] "Generic (PLEG): container finished" podID="b7aa32bb-3ee7-4954-906d-37262ff4637f" containerID="d607039cd6e336e626c7f791fa4173ea4eb51388ee4732d871920d842ca51da0" exitCode=137 Nov 21 14:05:44 crc kubenswrapper[5133]: I1121 14:05:44.997656 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b7aa32bb-3ee7-4954-906d-37262ff4637f","Type":"ContainerDied","Data":"d607039cd6e336e626c7f791fa4173ea4eb51388ee4732d871920d842ca51da0"} Nov 21 14:05:44 crc kubenswrapper[5133]: I1121 14:05:44.997700 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b7aa32bb-3ee7-4954-906d-37262ff4637f","Type":"ContainerDied","Data":"b1e7a41ebf5af0cc12f8c5bea1f7f010015cd11245ac3cbff95ef69ec68b0676"} Nov 21 14:05:44 crc kubenswrapper[5133]: I1121 14:05:44.997723 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b1e7a41ebf5af0cc12f8c5bea1f7f010015cd11245ac3cbff95ef69ec68b0676" Nov 21 14:05:44 crc kubenswrapper[5133]: I1121 14:05:44.998903 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 14:05:45 crc kubenswrapper[5133]: I1121 14:05:45.105474 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b7aa32bb-3ee7-4954-906d-37262ff4637f-combined-ca-bundle\") pod \"b7aa32bb-3ee7-4954-906d-37262ff4637f\" (UID: \"b7aa32bb-3ee7-4954-906d-37262ff4637f\") " Nov 21 14:05:45 crc kubenswrapper[5133]: I1121 14:05:45.105787 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b7aa32bb-3ee7-4954-906d-37262ff4637f-run-httpd\") pod \"b7aa32bb-3ee7-4954-906d-37262ff4637f\" (UID: \"b7aa32bb-3ee7-4954-906d-37262ff4637f\") " Nov 21 14:05:45 crc kubenswrapper[5133]: I1121 14:05:45.105833 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x8b9p\" (UniqueName: \"kubernetes.io/projected/b7aa32bb-3ee7-4954-906d-37262ff4637f-kube-api-access-x8b9p\") pod \"b7aa32bb-3ee7-4954-906d-37262ff4637f\" (UID: \"b7aa32bb-3ee7-4954-906d-37262ff4637f\") " Nov 21 14:05:45 crc kubenswrapper[5133]: I1121 14:05:45.106037 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b7aa32bb-3ee7-4954-906d-37262ff4637f-sg-core-conf-yaml\") pod \"b7aa32bb-3ee7-4954-906d-37262ff4637f\" (UID: \"b7aa32bb-3ee7-4954-906d-37262ff4637f\") " Nov 21 14:05:45 crc kubenswrapper[5133]: I1121 14:05:45.106064 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b7aa32bb-3ee7-4954-906d-37262ff4637f-scripts\") pod \"b7aa32bb-3ee7-4954-906d-37262ff4637f\" (UID: \"b7aa32bb-3ee7-4954-906d-37262ff4637f\") " Nov 21 14:05:45 crc kubenswrapper[5133]: I1121 14:05:45.106100 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b7aa32bb-3ee7-4954-906d-37262ff4637f-log-httpd\") pod \"b7aa32bb-3ee7-4954-906d-37262ff4637f\" (UID: \"b7aa32bb-3ee7-4954-906d-37262ff4637f\") " Nov 21 14:05:45 crc kubenswrapper[5133]: I1121 14:05:45.106126 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b7aa32bb-3ee7-4954-906d-37262ff4637f-config-data\") pod \"b7aa32bb-3ee7-4954-906d-37262ff4637f\" (UID: \"b7aa32bb-3ee7-4954-906d-37262ff4637f\") " Nov 21 14:05:45 crc kubenswrapper[5133]: I1121 14:05:45.106349 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b7aa32bb-3ee7-4954-906d-37262ff4637f-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "b7aa32bb-3ee7-4954-906d-37262ff4637f" (UID: "b7aa32bb-3ee7-4954-906d-37262ff4637f"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:05:45 crc kubenswrapper[5133]: I1121 14:05:45.106483 5133 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b7aa32bb-3ee7-4954-906d-37262ff4637f-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 21 14:05:45 crc kubenswrapper[5133]: I1121 14:05:45.106981 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b7aa32bb-3ee7-4954-906d-37262ff4637f-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "b7aa32bb-3ee7-4954-906d-37262ff4637f" (UID: "b7aa32bb-3ee7-4954-906d-37262ff4637f"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:05:45 crc kubenswrapper[5133]: I1121 14:05:45.112409 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b7aa32bb-3ee7-4954-906d-37262ff4637f-kube-api-access-x8b9p" (OuterVolumeSpecName: "kube-api-access-x8b9p") pod "b7aa32bb-3ee7-4954-906d-37262ff4637f" (UID: "b7aa32bb-3ee7-4954-906d-37262ff4637f"). InnerVolumeSpecName "kube-api-access-x8b9p". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:05:45 crc kubenswrapper[5133]: I1121 14:05:45.125174 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b7aa32bb-3ee7-4954-906d-37262ff4637f-scripts" (OuterVolumeSpecName: "scripts") pod "b7aa32bb-3ee7-4954-906d-37262ff4637f" (UID: "b7aa32bb-3ee7-4954-906d-37262ff4637f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:05:45 crc kubenswrapper[5133]: I1121 14:05:45.140250 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b7aa32bb-3ee7-4954-906d-37262ff4637f-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "b7aa32bb-3ee7-4954-906d-37262ff4637f" (UID: "b7aa32bb-3ee7-4954-906d-37262ff4637f"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:05:45 crc kubenswrapper[5133]: I1121 14:05:45.194764 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b7aa32bb-3ee7-4954-906d-37262ff4637f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b7aa32bb-3ee7-4954-906d-37262ff4637f" (UID: "b7aa32bb-3ee7-4954-906d-37262ff4637f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:05:45 crc kubenswrapper[5133]: I1121 14:05:45.208810 5133 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b7aa32bb-3ee7-4954-906d-37262ff4637f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 14:05:45 crc kubenswrapper[5133]: I1121 14:05:45.208850 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x8b9p\" (UniqueName: \"kubernetes.io/projected/b7aa32bb-3ee7-4954-906d-37262ff4637f-kube-api-access-x8b9p\") on node \"crc\" DevicePath \"\"" Nov 21 14:05:45 crc kubenswrapper[5133]: I1121 14:05:45.208869 5133 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b7aa32bb-3ee7-4954-906d-37262ff4637f-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 21 14:05:45 crc kubenswrapper[5133]: I1121 14:05:45.208880 5133 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b7aa32bb-3ee7-4954-906d-37262ff4637f-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 14:05:45 crc kubenswrapper[5133]: I1121 14:05:45.208893 5133 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b7aa32bb-3ee7-4954-906d-37262ff4637f-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 21 14:05:45 crc kubenswrapper[5133]: I1121 14:05:45.220634 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b7aa32bb-3ee7-4954-906d-37262ff4637f-config-data" (OuterVolumeSpecName: "config-data") pod "b7aa32bb-3ee7-4954-906d-37262ff4637f" (UID: "b7aa32bb-3ee7-4954-906d-37262ff4637f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:05:45 crc kubenswrapper[5133]: I1121 14:05:45.311730 5133 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b7aa32bb-3ee7-4954-906d-37262ff4637f-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 14:05:46 crc kubenswrapper[5133]: I1121 14:05:46.008240 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 14:05:46 crc kubenswrapper[5133]: I1121 14:05:46.052704 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 21 14:05:46 crc kubenswrapper[5133]: I1121 14:05:46.065941 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 21 14:05:46 crc kubenswrapper[5133]: I1121 14:05:46.087543 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 21 14:05:46 crc kubenswrapper[5133]: E1121 14:05:46.088175 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b7aa32bb-3ee7-4954-906d-37262ff4637f" containerName="proxy-httpd" Nov 21 14:05:46 crc kubenswrapper[5133]: I1121 14:05:46.088207 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="b7aa32bb-3ee7-4954-906d-37262ff4637f" containerName="proxy-httpd" Nov 21 14:05:46 crc kubenswrapper[5133]: E1121 14:05:46.088230 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b7aa32bb-3ee7-4954-906d-37262ff4637f" containerName="ceilometer-central-agent" Nov 21 14:05:46 crc kubenswrapper[5133]: I1121 14:05:46.088239 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="b7aa32bb-3ee7-4954-906d-37262ff4637f" containerName="ceilometer-central-agent" Nov 21 14:05:46 crc kubenswrapper[5133]: E1121 14:05:46.088282 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b7aa32bb-3ee7-4954-906d-37262ff4637f" containerName="ceilometer-notification-agent" Nov 21 14:05:46 crc kubenswrapper[5133]: I1121 14:05:46.088291 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="b7aa32bb-3ee7-4954-906d-37262ff4637f" containerName="ceilometer-notification-agent" Nov 21 14:05:46 crc kubenswrapper[5133]: E1121 14:05:46.088308 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b7aa32bb-3ee7-4954-906d-37262ff4637f" containerName="sg-core" Nov 21 14:05:46 crc kubenswrapper[5133]: I1121 14:05:46.088316 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="b7aa32bb-3ee7-4954-906d-37262ff4637f" containerName="sg-core" Nov 21 14:05:46 crc kubenswrapper[5133]: I1121 14:05:46.088539 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="b7aa32bb-3ee7-4954-906d-37262ff4637f" containerName="proxy-httpd" Nov 21 14:05:46 crc kubenswrapper[5133]: I1121 14:05:46.088565 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="b7aa32bb-3ee7-4954-906d-37262ff4637f" containerName="ceilometer-central-agent" Nov 21 14:05:46 crc kubenswrapper[5133]: I1121 14:05:46.088577 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="b7aa32bb-3ee7-4954-906d-37262ff4637f" containerName="sg-core" Nov 21 14:05:46 crc kubenswrapper[5133]: I1121 14:05:46.088602 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="b7aa32bb-3ee7-4954-906d-37262ff4637f" containerName="ceilometer-notification-agent" Nov 21 14:05:46 crc kubenswrapper[5133]: I1121 14:05:46.090932 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 14:05:46 crc kubenswrapper[5133]: I1121 14:05:46.094701 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 21 14:05:46 crc kubenswrapper[5133]: I1121 14:05:46.094866 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 21 14:05:46 crc kubenswrapper[5133]: I1121 14:05:46.102712 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 21 14:05:46 crc kubenswrapper[5133]: I1121 14:05:46.230069 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/cf456764-4242-4005-a375-6907d47a6e57-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"cf456764-4242-4005-a375-6907d47a6e57\") " pod="openstack/ceilometer-0" Nov 21 14:05:46 crc kubenswrapper[5133]: I1121 14:05:46.230151 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf456764-4242-4005-a375-6907d47a6e57-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"cf456764-4242-4005-a375-6907d47a6e57\") " pod="openstack/ceilometer-0" Nov 21 14:05:46 crc kubenswrapper[5133]: I1121 14:05:46.230235 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-blwcb\" (UniqueName: \"kubernetes.io/projected/cf456764-4242-4005-a375-6907d47a6e57-kube-api-access-blwcb\") pod \"ceilometer-0\" (UID: \"cf456764-4242-4005-a375-6907d47a6e57\") " pod="openstack/ceilometer-0" Nov 21 14:05:46 crc kubenswrapper[5133]: I1121 14:05:46.230350 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cf456764-4242-4005-a375-6907d47a6e57-run-httpd\") pod \"ceilometer-0\" (UID: \"cf456764-4242-4005-a375-6907d47a6e57\") " pod="openstack/ceilometer-0" Nov 21 14:05:46 crc kubenswrapper[5133]: I1121 14:05:46.230412 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cf456764-4242-4005-a375-6907d47a6e57-log-httpd\") pod \"ceilometer-0\" (UID: \"cf456764-4242-4005-a375-6907d47a6e57\") " pod="openstack/ceilometer-0" Nov 21 14:05:46 crc kubenswrapper[5133]: I1121 14:05:46.230514 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cf456764-4242-4005-a375-6907d47a6e57-scripts\") pod \"ceilometer-0\" (UID: \"cf456764-4242-4005-a375-6907d47a6e57\") " pod="openstack/ceilometer-0" Nov 21 14:05:46 crc kubenswrapper[5133]: I1121 14:05:46.230566 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cf456764-4242-4005-a375-6907d47a6e57-config-data\") pod \"ceilometer-0\" (UID: \"cf456764-4242-4005-a375-6907d47a6e57\") " pod="openstack/ceilometer-0" Nov 21 14:05:46 crc kubenswrapper[5133]: I1121 14:05:46.332310 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/cf456764-4242-4005-a375-6907d47a6e57-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"cf456764-4242-4005-a375-6907d47a6e57\") " pod="openstack/ceilometer-0" Nov 21 14:05:46 crc kubenswrapper[5133]: I1121 14:05:46.332419 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf456764-4242-4005-a375-6907d47a6e57-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"cf456764-4242-4005-a375-6907d47a6e57\") " pod="openstack/ceilometer-0" Nov 21 14:05:46 crc kubenswrapper[5133]: I1121 14:05:46.332546 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-blwcb\" (UniqueName: \"kubernetes.io/projected/cf456764-4242-4005-a375-6907d47a6e57-kube-api-access-blwcb\") pod \"ceilometer-0\" (UID: \"cf456764-4242-4005-a375-6907d47a6e57\") " pod="openstack/ceilometer-0" Nov 21 14:05:46 crc kubenswrapper[5133]: I1121 14:05:46.332659 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cf456764-4242-4005-a375-6907d47a6e57-run-httpd\") pod \"ceilometer-0\" (UID: \"cf456764-4242-4005-a375-6907d47a6e57\") " pod="openstack/ceilometer-0" Nov 21 14:05:46 crc kubenswrapper[5133]: I1121 14:05:46.332743 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cf456764-4242-4005-a375-6907d47a6e57-log-httpd\") pod \"ceilometer-0\" (UID: \"cf456764-4242-4005-a375-6907d47a6e57\") " pod="openstack/ceilometer-0" Nov 21 14:05:46 crc kubenswrapper[5133]: I1121 14:05:46.332896 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cf456764-4242-4005-a375-6907d47a6e57-scripts\") pod \"ceilometer-0\" (UID: \"cf456764-4242-4005-a375-6907d47a6e57\") " pod="openstack/ceilometer-0" Nov 21 14:05:46 crc kubenswrapper[5133]: I1121 14:05:46.332974 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cf456764-4242-4005-a375-6907d47a6e57-config-data\") pod \"ceilometer-0\" (UID: \"cf456764-4242-4005-a375-6907d47a6e57\") " pod="openstack/ceilometer-0" Nov 21 14:05:46 crc kubenswrapper[5133]: I1121 14:05:46.333773 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cf456764-4242-4005-a375-6907d47a6e57-run-httpd\") pod \"ceilometer-0\" (UID: \"cf456764-4242-4005-a375-6907d47a6e57\") " pod="openstack/ceilometer-0" Nov 21 14:05:46 crc kubenswrapper[5133]: I1121 14:05:46.334056 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cf456764-4242-4005-a375-6907d47a6e57-log-httpd\") pod \"ceilometer-0\" (UID: \"cf456764-4242-4005-a375-6907d47a6e57\") " pod="openstack/ceilometer-0" Nov 21 14:05:46 crc kubenswrapper[5133]: I1121 14:05:46.336989 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cf456764-4242-4005-a375-6907d47a6e57-scripts\") pod \"ceilometer-0\" (UID: \"cf456764-4242-4005-a375-6907d47a6e57\") " pod="openstack/ceilometer-0" Nov 21 14:05:46 crc kubenswrapper[5133]: I1121 14:05:46.337231 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf456764-4242-4005-a375-6907d47a6e57-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"cf456764-4242-4005-a375-6907d47a6e57\") " pod="openstack/ceilometer-0" Nov 21 14:05:46 crc kubenswrapper[5133]: I1121 14:05:46.337522 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/cf456764-4242-4005-a375-6907d47a6e57-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"cf456764-4242-4005-a375-6907d47a6e57\") " pod="openstack/ceilometer-0" Nov 21 14:05:46 crc kubenswrapper[5133]: I1121 14:05:46.339756 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cf456764-4242-4005-a375-6907d47a6e57-config-data\") pod \"ceilometer-0\" (UID: \"cf456764-4242-4005-a375-6907d47a6e57\") " pod="openstack/ceilometer-0" Nov 21 14:05:46 crc kubenswrapper[5133]: I1121 14:05:46.367314 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-blwcb\" (UniqueName: \"kubernetes.io/projected/cf456764-4242-4005-a375-6907d47a6e57-kube-api-access-blwcb\") pod \"ceilometer-0\" (UID: \"cf456764-4242-4005-a375-6907d47a6e57\") " pod="openstack/ceilometer-0" Nov 21 14:05:46 crc kubenswrapper[5133]: I1121 14:05:46.414758 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 14:05:46 crc kubenswrapper[5133]: I1121 14:05:46.476626 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b7aa32bb-3ee7-4954-906d-37262ff4637f" path="/var/lib/kubelet/pods/b7aa32bb-3ee7-4954-906d-37262ff4637f/volumes" Nov 21 14:05:46 crc kubenswrapper[5133]: W1121 14:05:46.861262 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcf456764_4242_4005_a375_6907d47a6e57.slice/crio-8fe5d391d6d95072bfa6fb1d07f13bfdb550e674ff1726bb7bb6a81c379f164b WatchSource:0}: Error finding container 8fe5d391d6d95072bfa6fb1d07f13bfdb550e674ff1726bb7bb6a81c379f164b: Status 404 returned error can't find the container with id 8fe5d391d6d95072bfa6fb1d07f13bfdb550e674ff1726bb7bb6a81c379f164b Nov 21 14:05:46 crc kubenswrapper[5133]: I1121 14:05:46.870821 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 21 14:05:47 crc kubenswrapper[5133]: I1121 14:05:47.018881 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cf456764-4242-4005-a375-6907d47a6e57","Type":"ContainerStarted","Data":"8fe5d391d6d95072bfa6fb1d07f13bfdb550e674ff1726bb7bb6a81c379f164b"} Nov 21 14:05:48 crc kubenswrapper[5133]: I1121 14:05:48.041487 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cf456764-4242-4005-a375-6907d47a6e57","Type":"ContainerStarted","Data":"f8f89aac00239bf2eb080f241d851d4bf7ebe3de6804e4ae7255d87adb7cb231"} Nov 21 14:05:49 crc kubenswrapper[5133]: I1121 14:05:49.055275 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cf456764-4242-4005-a375-6907d47a6e57","Type":"ContainerStarted","Data":"07a1cace95003875fe4bb26916a74fa8cec4803a8a98d974040e016f7fa755d8"} Nov 21 14:05:49 crc kubenswrapper[5133]: I1121 14:05:49.055869 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cf456764-4242-4005-a375-6907d47a6e57","Type":"ContainerStarted","Data":"5ec1d1505d36a49fc8295d4c19fa8aacd8985140309a4b1ef1bd7e3087b8651a"} Nov 21 14:05:51 crc kubenswrapper[5133]: I1121 14:05:51.074653 5133 generic.go:334] "Generic (PLEG): container finished" podID="1d6200bb-1d14-4055-b2e3-448d9e43254b" containerID="958ef6915ad5e8b47995172884a4e2caea9f1e9225533aafc54e62a38b8b9e98" exitCode=0 Nov 21 14:05:51 crc kubenswrapper[5133]: I1121 14:05:51.074866 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-97b9q" event={"ID":"1d6200bb-1d14-4055-b2e3-448d9e43254b","Type":"ContainerDied","Data":"958ef6915ad5e8b47995172884a4e2caea9f1e9225533aafc54e62a38b8b9e98"} Nov 21 14:05:51 crc kubenswrapper[5133]: I1121 14:05:51.078926 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cf456764-4242-4005-a375-6907d47a6e57","Type":"ContainerStarted","Data":"cf3c0ec71b455d21002e500e92efd309e8fbb304f5a67165da473c8bc2860187"} Nov 21 14:05:51 crc kubenswrapper[5133]: I1121 14:05:51.079274 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 21 14:05:51 crc kubenswrapper[5133]: I1121 14:05:51.117018 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.898718812 podStartE2EDuration="5.116970904s" podCreationTimestamp="2025-11-21 14:05:46 +0000 UTC" firstStartedPulling="2025-11-21 14:05:46.863811896 +0000 UTC m=+1406.661644144" lastFinishedPulling="2025-11-21 14:05:50.082063988 +0000 UTC m=+1409.879896236" observedRunningTime="2025-11-21 14:05:51.109169186 +0000 UTC m=+1410.907001444" watchObservedRunningTime="2025-11-21 14:05:51.116970904 +0000 UTC m=+1410.914803152" Nov 21 14:05:52 crc kubenswrapper[5133]: I1121 14:05:52.484559 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-97b9q" Nov 21 14:05:52 crc kubenswrapper[5133]: I1121 14:05:52.658676 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gpc4b\" (UniqueName: \"kubernetes.io/projected/1d6200bb-1d14-4055-b2e3-448d9e43254b-kube-api-access-gpc4b\") pod \"1d6200bb-1d14-4055-b2e3-448d9e43254b\" (UID: \"1d6200bb-1d14-4055-b2e3-448d9e43254b\") " Nov 21 14:05:52 crc kubenswrapper[5133]: I1121 14:05:52.659039 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1d6200bb-1d14-4055-b2e3-448d9e43254b-config-data\") pod \"1d6200bb-1d14-4055-b2e3-448d9e43254b\" (UID: \"1d6200bb-1d14-4055-b2e3-448d9e43254b\") " Nov 21 14:05:52 crc kubenswrapper[5133]: I1121 14:05:52.659188 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1d6200bb-1d14-4055-b2e3-448d9e43254b-combined-ca-bundle\") pod \"1d6200bb-1d14-4055-b2e3-448d9e43254b\" (UID: \"1d6200bb-1d14-4055-b2e3-448d9e43254b\") " Nov 21 14:05:52 crc kubenswrapper[5133]: I1121 14:05:52.659321 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1d6200bb-1d14-4055-b2e3-448d9e43254b-scripts\") pod \"1d6200bb-1d14-4055-b2e3-448d9e43254b\" (UID: \"1d6200bb-1d14-4055-b2e3-448d9e43254b\") " Nov 21 14:05:52 crc kubenswrapper[5133]: I1121 14:05:52.666584 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d6200bb-1d14-4055-b2e3-448d9e43254b-kube-api-access-gpc4b" (OuterVolumeSpecName: "kube-api-access-gpc4b") pod "1d6200bb-1d14-4055-b2e3-448d9e43254b" (UID: "1d6200bb-1d14-4055-b2e3-448d9e43254b"). InnerVolumeSpecName "kube-api-access-gpc4b". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:05:52 crc kubenswrapper[5133]: I1121 14:05:52.667544 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1d6200bb-1d14-4055-b2e3-448d9e43254b-scripts" (OuterVolumeSpecName: "scripts") pod "1d6200bb-1d14-4055-b2e3-448d9e43254b" (UID: "1d6200bb-1d14-4055-b2e3-448d9e43254b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:05:52 crc kubenswrapper[5133]: I1121 14:05:52.692499 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1d6200bb-1d14-4055-b2e3-448d9e43254b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1d6200bb-1d14-4055-b2e3-448d9e43254b" (UID: "1d6200bb-1d14-4055-b2e3-448d9e43254b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:05:52 crc kubenswrapper[5133]: I1121 14:05:52.712984 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1d6200bb-1d14-4055-b2e3-448d9e43254b-config-data" (OuterVolumeSpecName: "config-data") pod "1d6200bb-1d14-4055-b2e3-448d9e43254b" (UID: "1d6200bb-1d14-4055-b2e3-448d9e43254b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:05:52 crc kubenswrapper[5133]: I1121 14:05:52.762123 5133 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1d6200bb-1d14-4055-b2e3-448d9e43254b-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 14:05:52 crc kubenswrapper[5133]: I1121 14:05:52.762168 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gpc4b\" (UniqueName: \"kubernetes.io/projected/1d6200bb-1d14-4055-b2e3-448d9e43254b-kube-api-access-gpc4b\") on node \"crc\" DevicePath \"\"" Nov 21 14:05:52 crc kubenswrapper[5133]: I1121 14:05:52.762184 5133 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1d6200bb-1d14-4055-b2e3-448d9e43254b-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 14:05:52 crc kubenswrapper[5133]: I1121 14:05:52.762198 5133 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1d6200bb-1d14-4055-b2e3-448d9e43254b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 14:05:53 crc kubenswrapper[5133]: I1121 14:05:53.108792 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-97b9q" event={"ID":"1d6200bb-1d14-4055-b2e3-448d9e43254b","Type":"ContainerDied","Data":"42d676380876a5d7892b93263a6009ed38131047ad7ad3220fb1c14a6a85746e"} Nov 21 14:05:53 crc kubenswrapper[5133]: I1121 14:05:53.109237 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="42d676380876a5d7892b93263a6009ed38131047ad7ad3220fb1c14a6a85746e" Nov 21 14:05:53 crc kubenswrapper[5133]: I1121 14:05:53.108904 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-97b9q" Nov 21 14:05:53 crc kubenswrapper[5133]: I1121 14:05:53.211717 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 21 14:05:53 crc kubenswrapper[5133]: E1121 14:05:53.212188 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1d6200bb-1d14-4055-b2e3-448d9e43254b" containerName="nova-cell0-conductor-db-sync" Nov 21 14:05:53 crc kubenswrapper[5133]: I1121 14:05:53.212207 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="1d6200bb-1d14-4055-b2e3-448d9e43254b" containerName="nova-cell0-conductor-db-sync" Nov 21 14:05:53 crc kubenswrapper[5133]: I1121 14:05:53.212429 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="1d6200bb-1d14-4055-b2e3-448d9e43254b" containerName="nova-cell0-conductor-db-sync" Nov 21 14:05:53 crc kubenswrapper[5133]: I1121 14:05:53.213037 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 21 14:05:53 crc kubenswrapper[5133]: I1121 14:05:53.215657 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 21 14:05:53 crc kubenswrapper[5133]: I1121 14:05:53.215992 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-q862r" Nov 21 14:05:53 crc kubenswrapper[5133]: I1121 14:05:53.234690 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 21 14:05:53 crc kubenswrapper[5133]: I1121 14:05:53.310496 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 14:05:53 crc kubenswrapper[5133]: I1121 14:05:53.310567 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 14:05:53 crc kubenswrapper[5133]: I1121 14:05:53.374010 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3b8a3cc3-d221-41ed-97ec-1fb8c4bf62c3-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"3b8a3cc3-d221-41ed-97ec-1fb8c4bf62c3\") " pod="openstack/nova-cell0-conductor-0" Nov 21 14:05:53 crc kubenswrapper[5133]: I1121 14:05:53.374132 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t7npr\" (UniqueName: \"kubernetes.io/projected/3b8a3cc3-d221-41ed-97ec-1fb8c4bf62c3-kube-api-access-t7npr\") pod \"nova-cell0-conductor-0\" (UID: \"3b8a3cc3-d221-41ed-97ec-1fb8c4bf62c3\") " pod="openstack/nova-cell0-conductor-0" Nov 21 14:05:53 crc kubenswrapper[5133]: I1121 14:05:53.375020 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3b8a3cc3-d221-41ed-97ec-1fb8c4bf62c3-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"3b8a3cc3-d221-41ed-97ec-1fb8c4bf62c3\") " pod="openstack/nova-cell0-conductor-0" Nov 21 14:05:53 crc kubenswrapper[5133]: I1121 14:05:53.476883 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3b8a3cc3-d221-41ed-97ec-1fb8c4bf62c3-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"3b8a3cc3-d221-41ed-97ec-1fb8c4bf62c3\") " pod="openstack/nova-cell0-conductor-0" Nov 21 14:05:53 crc kubenswrapper[5133]: I1121 14:05:53.476953 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3b8a3cc3-d221-41ed-97ec-1fb8c4bf62c3-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"3b8a3cc3-d221-41ed-97ec-1fb8c4bf62c3\") " pod="openstack/nova-cell0-conductor-0" Nov 21 14:05:53 crc kubenswrapper[5133]: I1121 14:05:53.477045 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t7npr\" (UniqueName: \"kubernetes.io/projected/3b8a3cc3-d221-41ed-97ec-1fb8c4bf62c3-kube-api-access-t7npr\") pod \"nova-cell0-conductor-0\" (UID: \"3b8a3cc3-d221-41ed-97ec-1fb8c4bf62c3\") " pod="openstack/nova-cell0-conductor-0" Nov 21 14:05:53 crc kubenswrapper[5133]: I1121 14:05:53.486247 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3b8a3cc3-d221-41ed-97ec-1fb8c4bf62c3-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"3b8a3cc3-d221-41ed-97ec-1fb8c4bf62c3\") " pod="openstack/nova-cell0-conductor-0" Nov 21 14:05:53 crc kubenswrapper[5133]: I1121 14:05:53.487728 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3b8a3cc3-d221-41ed-97ec-1fb8c4bf62c3-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"3b8a3cc3-d221-41ed-97ec-1fb8c4bf62c3\") " pod="openstack/nova-cell0-conductor-0" Nov 21 14:05:53 crc kubenswrapper[5133]: I1121 14:05:53.498502 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t7npr\" (UniqueName: \"kubernetes.io/projected/3b8a3cc3-d221-41ed-97ec-1fb8c4bf62c3-kube-api-access-t7npr\") pod \"nova-cell0-conductor-0\" (UID: \"3b8a3cc3-d221-41ed-97ec-1fb8c4bf62c3\") " pod="openstack/nova-cell0-conductor-0" Nov 21 14:05:53 crc kubenswrapper[5133]: I1121 14:05:53.535217 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 21 14:05:54 crc kubenswrapper[5133]: I1121 14:05:54.018614 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 21 14:05:54 crc kubenswrapper[5133]: W1121 14:05:54.024359 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3b8a3cc3_d221_41ed_97ec_1fb8c4bf62c3.slice/crio-f08534d187457a7fe160bcdc5200c2798ac61493b3b4787026f12616722353d7 WatchSource:0}: Error finding container f08534d187457a7fe160bcdc5200c2798ac61493b3b4787026f12616722353d7: Status 404 returned error can't find the container with id f08534d187457a7fe160bcdc5200c2798ac61493b3b4787026f12616722353d7 Nov 21 14:05:54 crc kubenswrapper[5133]: I1121 14:05:54.119459 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"3b8a3cc3-d221-41ed-97ec-1fb8c4bf62c3","Type":"ContainerStarted","Data":"f08534d187457a7fe160bcdc5200c2798ac61493b3b4787026f12616722353d7"} Nov 21 14:05:55 crc kubenswrapper[5133]: I1121 14:05:55.129700 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"3b8a3cc3-d221-41ed-97ec-1fb8c4bf62c3","Type":"ContainerStarted","Data":"d2a15f6331d94b597fa3029a568a37bfcbdd856ed6431450efb8643f060bfa3d"} Nov 21 14:05:55 crc kubenswrapper[5133]: I1121 14:05:55.131277 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Nov 21 14:05:55 crc kubenswrapper[5133]: I1121 14:05:55.157007 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.15697262 podStartE2EDuration="2.15697262s" podCreationTimestamp="2025-11-21 14:05:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 14:05:55.150328943 +0000 UTC m=+1414.948161191" watchObservedRunningTime="2025-11-21 14:05:55.15697262 +0000 UTC m=+1414.954804868" Nov 21 14:06:03 crc kubenswrapper[5133]: I1121 14:06:03.561141 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.071290 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-pk7zb"] Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.073152 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-pk7zb" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.079884 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.080385 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.087986 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/18aab30d-6eea-41e2-966c-371f6e0a2e76-scripts\") pod \"nova-cell0-cell-mapping-pk7zb\" (UID: \"18aab30d-6eea-41e2-966c-371f6e0a2e76\") " pod="openstack/nova-cell0-cell-mapping-pk7zb" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.088089 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/18aab30d-6eea-41e2-966c-371f6e0a2e76-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-pk7zb\" (UID: \"18aab30d-6eea-41e2-966c-371f6e0a2e76\") " pod="openstack/nova-cell0-cell-mapping-pk7zb" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.088310 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6ngwt\" (UniqueName: \"kubernetes.io/projected/18aab30d-6eea-41e2-966c-371f6e0a2e76-kube-api-access-6ngwt\") pod \"nova-cell0-cell-mapping-pk7zb\" (UID: \"18aab30d-6eea-41e2-966c-371f6e0a2e76\") " pod="openstack/nova-cell0-cell-mapping-pk7zb" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.088726 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/18aab30d-6eea-41e2-966c-371f6e0a2e76-config-data\") pod \"nova-cell0-cell-mapping-pk7zb\" (UID: \"18aab30d-6eea-41e2-966c-371f6e0a2e76\") " pod="openstack/nova-cell0-cell-mapping-pk7zb" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.097054 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-pk7zb"] Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.193878 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/18aab30d-6eea-41e2-966c-371f6e0a2e76-scripts\") pod \"nova-cell0-cell-mapping-pk7zb\" (UID: \"18aab30d-6eea-41e2-966c-371f6e0a2e76\") " pod="openstack/nova-cell0-cell-mapping-pk7zb" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.194081 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/18aab30d-6eea-41e2-966c-371f6e0a2e76-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-pk7zb\" (UID: \"18aab30d-6eea-41e2-966c-371f6e0a2e76\") " pod="openstack/nova-cell0-cell-mapping-pk7zb" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.194215 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6ngwt\" (UniqueName: \"kubernetes.io/projected/18aab30d-6eea-41e2-966c-371f6e0a2e76-kube-api-access-6ngwt\") pod \"nova-cell0-cell-mapping-pk7zb\" (UID: \"18aab30d-6eea-41e2-966c-371f6e0a2e76\") " pod="openstack/nova-cell0-cell-mapping-pk7zb" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.194352 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/18aab30d-6eea-41e2-966c-371f6e0a2e76-config-data\") pod \"nova-cell0-cell-mapping-pk7zb\" (UID: \"18aab30d-6eea-41e2-966c-371f6e0a2e76\") " pod="openstack/nova-cell0-cell-mapping-pk7zb" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.201986 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/18aab30d-6eea-41e2-966c-371f6e0a2e76-scripts\") pod \"nova-cell0-cell-mapping-pk7zb\" (UID: \"18aab30d-6eea-41e2-966c-371f6e0a2e76\") " pod="openstack/nova-cell0-cell-mapping-pk7zb" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.213759 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/18aab30d-6eea-41e2-966c-371f6e0a2e76-config-data\") pod \"nova-cell0-cell-mapping-pk7zb\" (UID: \"18aab30d-6eea-41e2-966c-371f6e0a2e76\") " pod="openstack/nova-cell0-cell-mapping-pk7zb" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.214461 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/18aab30d-6eea-41e2-966c-371f6e0a2e76-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-pk7zb\" (UID: \"18aab30d-6eea-41e2-966c-371f6e0a2e76\") " pod="openstack/nova-cell0-cell-mapping-pk7zb" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.215291 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6ngwt\" (UniqueName: \"kubernetes.io/projected/18aab30d-6eea-41e2-966c-371f6e0a2e76-kube-api-access-6ngwt\") pod \"nova-cell0-cell-mapping-pk7zb\" (UID: \"18aab30d-6eea-41e2-966c-371f6e0a2e76\") " pod="openstack/nova-cell0-cell-mapping-pk7zb" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.267431 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.268878 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.271530 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.282817 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.297165 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5ed3ebfb-e282-425f-b086-f90ed8782d39-logs\") pod \"nova-api-0\" (UID: \"5ed3ebfb-e282-425f-b086-f90ed8782d39\") " pod="openstack/nova-api-0" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.297325 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ed3ebfb-e282-425f-b086-f90ed8782d39-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"5ed3ebfb-e282-425f-b086-f90ed8782d39\") " pod="openstack/nova-api-0" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.297412 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ed3ebfb-e282-425f-b086-f90ed8782d39-config-data\") pod \"nova-api-0\" (UID: \"5ed3ebfb-e282-425f-b086-f90ed8782d39\") " pod="openstack/nova-api-0" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.297472 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-454cg\" (UniqueName: \"kubernetes.io/projected/5ed3ebfb-e282-425f-b086-f90ed8782d39-kube-api-access-454cg\") pod \"nova-api-0\" (UID: \"5ed3ebfb-e282-425f-b086-f90ed8782d39\") " pod="openstack/nova-api-0" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.350036 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.351960 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.355167 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.369707 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.399135 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/485cb8ff-9a40-4673-82df-ac656fcb037a-logs\") pod \"nova-metadata-0\" (UID: \"485cb8ff-9a40-4673-82df-ac656fcb037a\") " pod="openstack/nova-metadata-0" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.399201 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ed3ebfb-e282-425f-b086-f90ed8782d39-config-data\") pod \"nova-api-0\" (UID: \"5ed3ebfb-e282-425f-b086-f90ed8782d39\") " pod="openstack/nova-api-0" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.399233 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-454cg\" (UniqueName: \"kubernetes.io/projected/5ed3ebfb-e282-425f-b086-f90ed8782d39-kube-api-access-454cg\") pod \"nova-api-0\" (UID: \"5ed3ebfb-e282-425f-b086-f90ed8782d39\") " pod="openstack/nova-api-0" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.399270 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q7fnq\" (UniqueName: \"kubernetes.io/projected/485cb8ff-9a40-4673-82df-ac656fcb037a-kube-api-access-q7fnq\") pod \"nova-metadata-0\" (UID: \"485cb8ff-9a40-4673-82df-ac656fcb037a\") " pod="openstack/nova-metadata-0" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.399292 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/485cb8ff-9a40-4673-82df-ac656fcb037a-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"485cb8ff-9a40-4673-82df-ac656fcb037a\") " pod="openstack/nova-metadata-0" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.399307 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/485cb8ff-9a40-4673-82df-ac656fcb037a-config-data\") pod \"nova-metadata-0\" (UID: \"485cb8ff-9a40-4673-82df-ac656fcb037a\") " pod="openstack/nova-metadata-0" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.399329 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5ed3ebfb-e282-425f-b086-f90ed8782d39-logs\") pod \"nova-api-0\" (UID: \"5ed3ebfb-e282-425f-b086-f90ed8782d39\") " pod="openstack/nova-api-0" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.399392 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ed3ebfb-e282-425f-b086-f90ed8782d39-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"5ed3ebfb-e282-425f-b086-f90ed8782d39\") " pod="openstack/nova-api-0" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.402954 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5ed3ebfb-e282-425f-b086-f90ed8782d39-logs\") pod \"nova-api-0\" (UID: \"5ed3ebfb-e282-425f-b086-f90ed8782d39\") " pod="openstack/nova-api-0" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.404703 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-pk7zb" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.412156 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ed3ebfb-e282-425f-b086-f90ed8782d39-config-data\") pod \"nova-api-0\" (UID: \"5ed3ebfb-e282-425f-b086-f90ed8782d39\") " pod="openstack/nova-api-0" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.425374 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ed3ebfb-e282-425f-b086-f90ed8782d39-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"5ed3ebfb-e282-425f-b086-f90ed8782d39\") " pod="openstack/nova-api-0" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.446131 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-454cg\" (UniqueName: \"kubernetes.io/projected/5ed3ebfb-e282-425f-b086-f90ed8782d39-kube-api-access-454cg\") pod \"nova-api-0\" (UID: \"5ed3ebfb-e282-425f-b086-f90ed8782d39\") " pod="openstack/nova-api-0" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.487287 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-8b8cf6657-9zpfq"] Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.489014 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8b8cf6657-9zpfq" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.500886 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8b8cf6657-9zpfq"] Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.502178 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d4eed958-bbdc-4a15-a42a-dfcfadd80a76-ovsdbserver-nb\") pod \"dnsmasq-dns-8b8cf6657-9zpfq\" (UID: \"d4eed958-bbdc-4a15-a42a-dfcfadd80a76\") " pod="openstack/dnsmasq-dns-8b8cf6657-9zpfq" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.502326 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q7fnq\" (UniqueName: \"kubernetes.io/projected/485cb8ff-9a40-4673-82df-ac656fcb037a-kube-api-access-q7fnq\") pod \"nova-metadata-0\" (UID: \"485cb8ff-9a40-4673-82df-ac656fcb037a\") " pod="openstack/nova-metadata-0" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.502367 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/485cb8ff-9a40-4673-82df-ac656fcb037a-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"485cb8ff-9a40-4673-82df-ac656fcb037a\") " pod="openstack/nova-metadata-0" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.502396 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/485cb8ff-9a40-4673-82df-ac656fcb037a-config-data\") pod \"nova-metadata-0\" (UID: \"485cb8ff-9a40-4673-82df-ac656fcb037a\") " pod="openstack/nova-metadata-0" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.502424 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d4eed958-bbdc-4a15-a42a-dfcfadd80a76-dns-svc\") pod \"dnsmasq-dns-8b8cf6657-9zpfq\" (UID: \"d4eed958-bbdc-4a15-a42a-dfcfadd80a76\") " pod="openstack/dnsmasq-dns-8b8cf6657-9zpfq" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.502580 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8xh59\" (UniqueName: \"kubernetes.io/projected/d4eed958-bbdc-4a15-a42a-dfcfadd80a76-kube-api-access-8xh59\") pod \"dnsmasq-dns-8b8cf6657-9zpfq\" (UID: \"d4eed958-bbdc-4a15-a42a-dfcfadd80a76\") " pod="openstack/dnsmasq-dns-8b8cf6657-9zpfq" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.502631 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d4eed958-bbdc-4a15-a42a-dfcfadd80a76-config\") pod \"dnsmasq-dns-8b8cf6657-9zpfq\" (UID: \"d4eed958-bbdc-4a15-a42a-dfcfadd80a76\") " pod="openstack/dnsmasq-dns-8b8cf6657-9zpfq" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.502683 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d4eed958-bbdc-4a15-a42a-dfcfadd80a76-ovsdbserver-sb\") pod \"dnsmasq-dns-8b8cf6657-9zpfq\" (UID: \"d4eed958-bbdc-4a15-a42a-dfcfadd80a76\") " pod="openstack/dnsmasq-dns-8b8cf6657-9zpfq" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.502705 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/485cb8ff-9a40-4673-82df-ac656fcb037a-logs\") pod \"nova-metadata-0\" (UID: \"485cb8ff-9a40-4673-82df-ac656fcb037a\") " pod="openstack/nova-metadata-0" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.503234 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/485cb8ff-9a40-4673-82df-ac656fcb037a-logs\") pod \"nova-metadata-0\" (UID: \"485cb8ff-9a40-4673-82df-ac656fcb037a\") " pod="openstack/nova-metadata-0" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.537937 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/485cb8ff-9a40-4673-82df-ac656fcb037a-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"485cb8ff-9a40-4673-82df-ac656fcb037a\") " pod="openstack/nova-metadata-0" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.547104 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.548464 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.548861 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q7fnq\" (UniqueName: \"kubernetes.io/projected/485cb8ff-9a40-4673-82df-ac656fcb037a-kube-api-access-q7fnq\") pod \"nova-metadata-0\" (UID: \"485cb8ff-9a40-4673-82df-ac656fcb037a\") " pod="openstack/nova-metadata-0" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.551609 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/485cb8ff-9a40-4673-82df-ac656fcb037a-config-data\") pod \"nova-metadata-0\" (UID: \"485cb8ff-9a40-4673-82df-ac656fcb037a\") " pod="openstack/nova-metadata-0" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.561211 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.576307 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.602800 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.603969 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.603975 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d4eed958-bbdc-4a15-a42a-dfcfadd80a76-ovsdbserver-sb\") pod \"dnsmasq-dns-8b8cf6657-9zpfq\" (UID: \"d4eed958-bbdc-4a15-a42a-dfcfadd80a76\") " pod="openstack/dnsmasq-dns-8b8cf6657-9zpfq" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.604038 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d4eed958-bbdc-4a15-a42a-dfcfadd80a76-ovsdbserver-nb\") pod \"dnsmasq-dns-8b8cf6657-9zpfq\" (UID: \"d4eed958-bbdc-4a15-a42a-dfcfadd80a76\") " pod="openstack/dnsmasq-dns-8b8cf6657-9zpfq" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.604876 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d4eed958-bbdc-4a15-a42a-dfcfadd80a76-dns-svc\") pod \"dnsmasq-dns-8b8cf6657-9zpfq\" (UID: \"d4eed958-bbdc-4a15-a42a-dfcfadd80a76\") " pod="openstack/dnsmasq-dns-8b8cf6657-9zpfq" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.605022 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bc5ef4e0-ce6d-4519-b4fc-b7aa0d04c8a8-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"bc5ef4e0-ce6d-4519-b4fc-b7aa0d04c8a8\") " pod="openstack/nova-scheduler-0" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.605048 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bc5ef4e0-ce6d-4519-b4fc-b7aa0d04c8a8-config-data\") pod \"nova-scheduler-0\" (UID: \"bc5ef4e0-ce6d-4519-b4fc-b7aa0d04c8a8\") " pod="openstack/nova-scheduler-0" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.605109 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c7kpj\" (UniqueName: \"kubernetes.io/projected/bc5ef4e0-ce6d-4519-b4fc-b7aa0d04c8a8-kube-api-access-c7kpj\") pod \"nova-scheduler-0\" (UID: \"bc5ef4e0-ce6d-4519-b4fc-b7aa0d04c8a8\") " pod="openstack/nova-scheduler-0" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.605139 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8xh59\" (UniqueName: \"kubernetes.io/projected/d4eed958-bbdc-4a15-a42a-dfcfadd80a76-kube-api-access-8xh59\") pod \"dnsmasq-dns-8b8cf6657-9zpfq\" (UID: \"d4eed958-bbdc-4a15-a42a-dfcfadd80a76\") " pod="openstack/dnsmasq-dns-8b8cf6657-9zpfq" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.605156 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d4eed958-bbdc-4a15-a42a-dfcfadd80a76-config\") pod \"dnsmasq-dns-8b8cf6657-9zpfq\" (UID: \"d4eed958-bbdc-4a15-a42a-dfcfadd80a76\") " pod="openstack/dnsmasq-dns-8b8cf6657-9zpfq" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.605274 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d4eed958-bbdc-4a15-a42a-dfcfadd80a76-ovsdbserver-sb\") pod \"dnsmasq-dns-8b8cf6657-9zpfq\" (UID: \"d4eed958-bbdc-4a15-a42a-dfcfadd80a76\") " pod="openstack/dnsmasq-dns-8b8cf6657-9zpfq" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.605367 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d4eed958-bbdc-4a15-a42a-dfcfadd80a76-ovsdbserver-nb\") pod \"dnsmasq-dns-8b8cf6657-9zpfq\" (UID: \"d4eed958-bbdc-4a15-a42a-dfcfadd80a76\") " pod="openstack/dnsmasq-dns-8b8cf6657-9zpfq" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.605831 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d4eed958-bbdc-4a15-a42a-dfcfadd80a76-dns-svc\") pod \"dnsmasq-dns-8b8cf6657-9zpfq\" (UID: \"d4eed958-bbdc-4a15-a42a-dfcfadd80a76\") " pod="openstack/dnsmasq-dns-8b8cf6657-9zpfq" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.606013 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d4eed958-bbdc-4a15-a42a-dfcfadd80a76-config\") pod \"dnsmasq-dns-8b8cf6657-9zpfq\" (UID: \"d4eed958-bbdc-4a15-a42a-dfcfadd80a76\") " pod="openstack/dnsmasq-dns-8b8cf6657-9zpfq" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.609826 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.612040 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.620178 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.633509 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8xh59\" (UniqueName: \"kubernetes.io/projected/d4eed958-bbdc-4a15-a42a-dfcfadd80a76-kube-api-access-8xh59\") pod \"dnsmasq-dns-8b8cf6657-9zpfq\" (UID: \"d4eed958-bbdc-4a15-a42a-dfcfadd80a76\") " pod="openstack/dnsmasq-dns-8b8cf6657-9zpfq" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.669694 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.720243 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mnczp\" (UniqueName: \"kubernetes.io/projected/614d147a-e367-4799-aa80-9704f71aa40c-kube-api-access-mnczp\") pod \"nova-cell1-novncproxy-0\" (UID: \"614d147a-e367-4799-aa80-9704f71aa40c\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.720313 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c7kpj\" (UniqueName: \"kubernetes.io/projected/bc5ef4e0-ce6d-4519-b4fc-b7aa0d04c8a8-kube-api-access-c7kpj\") pod \"nova-scheduler-0\" (UID: \"bc5ef4e0-ce6d-4519-b4fc-b7aa0d04c8a8\") " pod="openstack/nova-scheduler-0" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.720367 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/614d147a-e367-4799-aa80-9704f71aa40c-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"614d147a-e367-4799-aa80-9704f71aa40c\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.720501 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bc5ef4e0-ce6d-4519-b4fc-b7aa0d04c8a8-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"bc5ef4e0-ce6d-4519-b4fc-b7aa0d04c8a8\") " pod="openstack/nova-scheduler-0" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.720528 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bc5ef4e0-ce6d-4519-b4fc-b7aa0d04c8a8-config-data\") pod \"nova-scheduler-0\" (UID: \"bc5ef4e0-ce6d-4519-b4fc-b7aa0d04c8a8\") " pod="openstack/nova-scheduler-0" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.720555 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/614d147a-e367-4799-aa80-9704f71aa40c-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"614d147a-e367-4799-aa80-9704f71aa40c\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.724680 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bc5ef4e0-ce6d-4519-b4fc-b7aa0d04c8a8-config-data\") pod \"nova-scheduler-0\" (UID: \"bc5ef4e0-ce6d-4519-b4fc-b7aa0d04c8a8\") " pod="openstack/nova-scheduler-0" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.727396 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bc5ef4e0-ce6d-4519-b4fc-b7aa0d04c8a8-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"bc5ef4e0-ce6d-4519-b4fc-b7aa0d04c8a8\") " pod="openstack/nova-scheduler-0" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.745103 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c7kpj\" (UniqueName: \"kubernetes.io/projected/bc5ef4e0-ce6d-4519-b4fc-b7aa0d04c8a8-kube-api-access-c7kpj\") pod \"nova-scheduler-0\" (UID: \"bc5ef4e0-ce6d-4519-b4fc-b7aa0d04c8a8\") " pod="openstack/nova-scheduler-0" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.823040 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/614d147a-e367-4799-aa80-9704f71aa40c-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"614d147a-e367-4799-aa80-9704f71aa40c\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.823104 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mnczp\" (UniqueName: \"kubernetes.io/projected/614d147a-e367-4799-aa80-9704f71aa40c-kube-api-access-mnczp\") pod \"nova-cell1-novncproxy-0\" (UID: \"614d147a-e367-4799-aa80-9704f71aa40c\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.823151 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/614d147a-e367-4799-aa80-9704f71aa40c-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"614d147a-e367-4799-aa80-9704f71aa40c\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.830132 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/614d147a-e367-4799-aa80-9704f71aa40c-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"614d147a-e367-4799-aa80-9704f71aa40c\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.832806 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/614d147a-e367-4799-aa80-9704f71aa40c-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"614d147a-e367-4799-aa80-9704f71aa40c\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.848030 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mnczp\" (UniqueName: \"kubernetes.io/projected/614d147a-e367-4799-aa80-9704f71aa40c-kube-api-access-mnczp\") pod \"nova-cell1-novncproxy-0\" (UID: \"614d147a-e367-4799-aa80-9704f71aa40c\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.862094 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8b8cf6657-9zpfq" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.908775 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 21 14:06:04 crc kubenswrapper[5133]: I1121 14:06:04.942163 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 21 14:06:05 crc kubenswrapper[5133]: W1121 14:06:05.076851 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod18aab30d_6eea_41e2_966c_371f6e0a2e76.slice/crio-83ca9432bb3afde3049dab43626f8631acc1b56cef30f0f3c037a9a93695848c WatchSource:0}: Error finding container 83ca9432bb3afde3049dab43626f8631acc1b56cef30f0f3c037a9a93695848c: Status 404 returned error can't find the container with id 83ca9432bb3afde3049dab43626f8631acc1b56cef30f0f3c037a9a93695848c Nov 21 14:06:05 crc kubenswrapper[5133]: I1121 14:06:05.077662 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-pk7zb"] Nov 21 14:06:05 crc kubenswrapper[5133]: I1121 14:06:05.226141 5133 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 21 14:06:05 crc kubenswrapper[5133]: I1121 14:06:05.252162 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 21 14:06:05 crc kubenswrapper[5133]: I1121 14:06:05.261775 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 21 14:06:05 crc kubenswrapper[5133]: I1121 14:06:05.265447 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-pk7zb" event={"ID":"18aab30d-6eea-41e2-966c-371f6e0a2e76","Type":"ContainerStarted","Data":"83ca9432bb3afde3049dab43626f8631acc1b56cef30f0f3c037a9a93695848c"} Nov 21 14:06:05 crc kubenswrapper[5133]: I1121 14:06:05.273657 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"485cb8ff-9a40-4673-82df-ac656fcb037a","Type":"ContainerStarted","Data":"43db2fc9669b0e7e134691b6dbaaa72adf52468d7af44d3c19b78b12484ca309"} Nov 21 14:06:05 crc kubenswrapper[5133]: I1121 14:06:05.275686 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"5ed3ebfb-e282-425f-b086-f90ed8782d39","Type":"ContainerStarted","Data":"fc2e4a7ab48cf945ddb5677dd5ef890d706cf21143416c556db7084f4b3815da"} Nov 21 14:06:05 crc kubenswrapper[5133]: I1121 14:06:05.278432 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-bf6r4"] Nov 21 14:06:05 crc kubenswrapper[5133]: I1121 14:06:05.281189 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-bf6r4" Nov 21 14:06:05 crc kubenswrapper[5133]: I1121 14:06:05.285174 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 21 14:06:05 crc kubenswrapper[5133]: I1121 14:06:05.285385 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Nov 21 14:06:05 crc kubenswrapper[5133]: I1121 14:06:05.304184 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-bf6r4"] Nov 21 14:06:05 crc kubenswrapper[5133]: I1121 14:06:05.332741 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/32a69b88-3e19-4aca-ad8e-0a5b35c136cf-config-data\") pod \"nova-cell1-conductor-db-sync-bf6r4\" (UID: \"32a69b88-3e19-4aca-ad8e-0a5b35c136cf\") " pod="openstack/nova-cell1-conductor-db-sync-bf6r4" Nov 21 14:06:05 crc kubenswrapper[5133]: I1121 14:06:05.332853 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/32a69b88-3e19-4aca-ad8e-0a5b35c136cf-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-bf6r4\" (UID: \"32a69b88-3e19-4aca-ad8e-0a5b35c136cf\") " pod="openstack/nova-cell1-conductor-db-sync-bf6r4" Nov 21 14:06:05 crc kubenswrapper[5133]: I1121 14:06:05.333293 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q4bfm\" (UniqueName: \"kubernetes.io/projected/32a69b88-3e19-4aca-ad8e-0a5b35c136cf-kube-api-access-q4bfm\") pod \"nova-cell1-conductor-db-sync-bf6r4\" (UID: \"32a69b88-3e19-4aca-ad8e-0a5b35c136cf\") " pod="openstack/nova-cell1-conductor-db-sync-bf6r4" Nov 21 14:06:05 crc kubenswrapper[5133]: I1121 14:06:05.334603 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/32a69b88-3e19-4aca-ad8e-0a5b35c136cf-scripts\") pod \"nova-cell1-conductor-db-sync-bf6r4\" (UID: \"32a69b88-3e19-4aca-ad8e-0a5b35c136cf\") " pod="openstack/nova-cell1-conductor-db-sync-bf6r4" Nov 21 14:06:05 crc kubenswrapper[5133]: I1121 14:06:05.344826 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 21 14:06:05 crc kubenswrapper[5133]: I1121 14:06:05.400376 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8b8cf6657-9zpfq"] Nov 21 14:06:05 crc kubenswrapper[5133]: I1121 14:06:05.436111 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/32a69b88-3e19-4aca-ad8e-0a5b35c136cf-config-data\") pod \"nova-cell1-conductor-db-sync-bf6r4\" (UID: \"32a69b88-3e19-4aca-ad8e-0a5b35c136cf\") " pod="openstack/nova-cell1-conductor-db-sync-bf6r4" Nov 21 14:06:05 crc kubenswrapper[5133]: I1121 14:06:05.437030 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/32a69b88-3e19-4aca-ad8e-0a5b35c136cf-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-bf6r4\" (UID: \"32a69b88-3e19-4aca-ad8e-0a5b35c136cf\") " pod="openstack/nova-cell1-conductor-db-sync-bf6r4" Nov 21 14:06:05 crc kubenswrapper[5133]: I1121 14:06:05.437084 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q4bfm\" (UniqueName: \"kubernetes.io/projected/32a69b88-3e19-4aca-ad8e-0a5b35c136cf-kube-api-access-q4bfm\") pod \"nova-cell1-conductor-db-sync-bf6r4\" (UID: \"32a69b88-3e19-4aca-ad8e-0a5b35c136cf\") " pod="openstack/nova-cell1-conductor-db-sync-bf6r4" Nov 21 14:06:05 crc kubenswrapper[5133]: I1121 14:06:05.437170 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/32a69b88-3e19-4aca-ad8e-0a5b35c136cf-scripts\") pod \"nova-cell1-conductor-db-sync-bf6r4\" (UID: \"32a69b88-3e19-4aca-ad8e-0a5b35c136cf\") " pod="openstack/nova-cell1-conductor-db-sync-bf6r4" Nov 21 14:06:05 crc kubenswrapper[5133]: I1121 14:06:05.445916 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/32a69b88-3e19-4aca-ad8e-0a5b35c136cf-scripts\") pod \"nova-cell1-conductor-db-sync-bf6r4\" (UID: \"32a69b88-3e19-4aca-ad8e-0a5b35c136cf\") " pod="openstack/nova-cell1-conductor-db-sync-bf6r4" Nov 21 14:06:05 crc kubenswrapper[5133]: I1121 14:06:05.447713 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/32a69b88-3e19-4aca-ad8e-0a5b35c136cf-config-data\") pod \"nova-cell1-conductor-db-sync-bf6r4\" (UID: \"32a69b88-3e19-4aca-ad8e-0a5b35c136cf\") " pod="openstack/nova-cell1-conductor-db-sync-bf6r4" Nov 21 14:06:05 crc kubenswrapper[5133]: I1121 14:06:05.448518 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/32a69b88-3e19-4aca-ad8e-0a5b35c136cf-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-bf6r4\" (UID: \"32a69b88-3e19-4aca-ad8e-0a5b35c136cf\") " pod="openstack/nova-cell1-conductor-db-sync-bf6r4" Nov 21 14:06:05 crc kubenswrapper[5133]: I1121 14:06:05.452564 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q4bfm\" (UniqueName: \"kubernetes.io/projected/32a69b88-3e19-4aca-ad8e-0a5b35c136cf-kube-api-access-q4bfm\") pod \"nova-cell1-conductor-db-sync-bf6r4\" (UID: \"32a69b88-3e19-4aca-ad8e-0a5b35c136cf\") " pod="openstack/nova-cell1-conductor-db-sync-bf6r4" Nov 21 14:06:05 crc kubenswrapper[5133]: I1121 14:06:05.500612 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-bf6r4" Nov 21 14:06:05 crc kubenswrapper[5133]: I1121 14:06:05.569744 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 21 14:06:05 crc kubenswrapper[5133]: I1121 14:06:05.947470 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-bf6r4"] Nov 21 14:06:05 crc kubenswrapper[5133]: W1121 14:06:05.967662 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod32a69b88_3e19_4aca_ad8e_0a5b35c136cf.slice/crio-6bac5fa3e90e9af3beb2e8f2ad2205a1c78a237aafe6091b2b5389f64b46ce11 WatchSource:0}: Error finding container 6bac5fa3e90e9af3beb2e8f2ad2205a1c78a237aafe6091b2b5389f64b46ce11: Status 404 returned error can't find the container with id 6bac5fa3e90e9af3beb2e8f2ad2205a1c78a237aafe6091b2b5389f64b46ce11 Nov 21 14:06:06 crc kubenswrapper[5133]: I1121 14:06:06.296228 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"614d147a-e367-4799-aa80-9704f71aa40c","Type":"ContainerStarted","Data":"8e3e99709e5e09c3d5986b3b1028951d8c0950df204a4973fc8f1b3cac84e6c6"} Nov 21 14:06:06 crc kubenswrapper[5133]: I1121 14:06:06.302974 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-pk7zb" event={"ID":"18aab30d-6eea-41e2-966c-371f6e0a2e76","Type":"ContainerStarted","Data":"6027cdfe5863f852ffa876e53682937454491f8d55703dfcdd48068add898d82"} Nov 21 14:06:06 crc kubenswrapper[5133]: I1121 14:06:06.316540 5133 generic.go:334] "Generic (PLEG): container finished" podID="d4eed958-bbdc-4a15-a42a-dfcfadd80a76" containerID="ee08a7c502b0b483d220726f2355a1ae53f9c18843dcb5555d54c2543cc9afd5" exitCode=0 Nov 21 14:06:06 crc kubenswrapper[5133]: I1121 14:06:06.316663 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8b8cf6657-9zpfq" event={"ID":"d4eed958-bbdc-4a15-a42a-dfcfadd80a76","Type":"ContainerDied","Data":"ee08a7c502b0b483d220726f2355a1ae53f9c18843dcb5555d54c2543cc9afd5"} Nov 21 14:06:06 crc kubenswrapper[5133]: I1121 14:06:06.316701 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8b8cf6657-9zpfq" event={"ID":"d4eed958-bbdc-4a15-a42a-dfcfadd80a76","Type":"ContainerStarted","Data":"ffad53d872dd2837941b263dca7f4a9a1e8538a87ce006b604fb97b0fbc35e0f"} Nov 21 14:06:06 crc kubenswrapper[5133]: I1121 14:06:06.337737 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-bf6r4" event={"ID":"32a69b88-3e19-4aca-ad8e-0a5b35c136cf","Type":"ContainerStarted","Data":"0f599c0d88facb1c35cf520e99b361a218cd88f867257177221f74b93266befd"} Nov 21 14:06:06 crc kubenswrapper[5133]: I1121 14:06:06.337795 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-bf6r4" event={"ID":"32a69b88-3e19-4aca-ad8e-0a5b35c136cf","Type":"ContainerStarted","Data":"6bac5fa3e90e9af3beb2e8f2ad2205a1c78a237aafe6091b2b5389f64b46ce11"} Nov 21 14:06:06 crc kubenswrapper[5133]: I1121 14:06:06.339073 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-pk7zb" podStartSLOduration=2.339051422 podStartE2EDuration="2.339051422s" podCreationTimestamp="2025-11-21 14:06:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 14:06:06.328361127 +0000 UTC m=+1426.126193375" watchObservedRunningTime="2025-11-21 14:06:06.339051422 +0000 UTC m=+1426.136883670" Nov 21 14:06:06 crc kubenswrapper[5133]: I1121 14:06:06.340959 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"bc5ef4e0-ce6d-4519-b4fc-b7aa0d04c8a8","Type":"ContainerStarted","Data":"be71da5dbc91b97af93bd6df6f57cd34cab4d7448c0e2cd3bf5bfd06fde69ab7"} Nov 21 14:06:06 crc kubenswrapper[5133]: I1121 14:06:06.412438 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-bf6r4" podStartSLOduration=1.412419202 podStartE2EDuration="1.412419202s" podCreationTimestamp="2025-11-21 14:06:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 14:06:06.409318369 +0000 UTC m=+1426.207150617" watchObservedRunningTime="2025-11-21 14:06:06.412419202 +0000 UTC m=+1426.210251450" Nov 21 14:06:07 crc kubenswrapper[5133]: I1121 14:06:07.362223 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8b8cf6657-9zpfq" event={"ID":"d4eed958-bbdc-4a15-a42a-dfcfadd80a76","Type":"ContainerStarted","Data":"ed9f187f7564012737f4b3c1a46af4c23ab0a3588e58b5e30460eca35827bf70"} Nov 21 14:06:07 crc kubenswrapper[5133]: I1121 14:06:07.362929 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-8b8cf6657-9zpfq" Nov 21 14:06:08 crc kubenswrapper[5133]: I1121 14:06:08.231241 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-8b8cf6657-9zpfq" podStartSLOduration=4.231222983 podStartE2EDuration="4.231222983s" podCreationTimestamp="2025-11-21 14:06:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 14:06:07.401734021 +0000 UTC m=+1427.199566269" watchObservedRunningTime="2025-11-21 14:06:08.231222983 +0000 UTC m=+1428.029055231" Nov 21 14:06:08 crc kubenswrapper[5133]: I1121 14:06:08.235577 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 21 14:06:08 crc kubenswrapper[5133]: I1121 14:06:08.251991 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 21 14:06:10 crc kubenswrapper[5133]: I1121 14:06:10.404884 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"614d147a-e367-4799-aa80-9704f71aa40c","Type":"ContainerStarted","Data":"b8fd34c5dc992e0eb7bb21ab6158be69062144a41f4b2edfa5dc13bec0816764"} Nov 21 14:06:10 crc kubenswrapper[5133]: I1121 14:06:10.404975 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="614d147a-e367-4799-aa80-9704f71aa40c" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://b8fd34c5dc992e0eb7bb21ab6158be69062144a41f4b2edfa5dc13bec0816764" gracePeriod=30 Nov 21 14:06:10 crc kubenswrapper[5133]: I1121 14:06:10.407202 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"485cb8ff-9a40-4673-82df-ac656fcb037a","Type":"ContainerStarted","Data":"88061c38c1093a3fcc1bb533fe459144724ec38aec52e008c69bc8cf5f663cd4"} Nov 21 14:06:10 crc kubenswrapper[5133]: I1121 14:06:10.407235 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"485cb8ff-9a40-4673-82df-ac656fcb037a","Type":"ContainerStarted","Data":"e237530ce3357d94264249d7aecc42b5073deaae82822982eaf855f78b977de2"} Nov 21 14:06:10 crc kubenswrapper[5133]: I1121 14:06:10.407397 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="485cb8ff-9a40-4673-82df-ac656fcb037a" containerName="nova-metadata-log" containerID="cri-o://e237530ce3357d94264249d7aecc42b5073deaae82822982eaf855f78b977de2" gracePeriod=30 Nov 21 14:06:10 crc kubenswrapper[5133]: I1121 14:06:10.407518 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="485cb8ff-9a40-4673-82df-ac656fcb037a" containerName="nova-metadata-metadata" containerID="cri-o://88061c38c1093a3fcc1bb533fe459144724ec38aec52e008c69bc8cf5f663cd4" gracePeriod=30 Nov 21 14:06:10 crc kubenswrapper[5133]: I1121 14:06:10.412387 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"5ed3ebfb-e282-425f-b086-f90ed8782d39","Type":"ContainerStarted","Data":"219eae399ca3e89586a3268c46d406cf6893ca3257f669a4e8d3da5456cf9a6c"} Nov 21 14:06:10 crc kubenswrapper[5133]: I1121 14:06:10.412432 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"5ed3ebfb-e282-425f-b086-f90ed8782d39","Type":"ContainerStarted","Data":"502e1733fbfa31392cd21daf95af8efcdb946ce979257b4b5bd795bf39e718d1"} Nov 21 14:06:10 crc kubenswrapper[5133]: I1121 14:06:10.417354 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"bc5ef4e0-ce6d-4519-b4fc-b7aa0d04c8a8","Type":"ContainerStarted","Data":"e301a655d4c2dfa4770a6ec603f88db379353fde6256c0b5d2cf428fc090a259"} Nov 21 14:06:10 crc kubenswrapper[5133]: I1121 14:06:10.428956 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.970753782 podStartE2EDuration="6.428937291s" podCreationTimestamp="2025-11-21 14:06:04 +0000 UTC" firstStartedPulling="2025-11-21 14:06:05.599319328 +0000 UTC m=+1425.397151576" lastFinishedPulling="2025-11-21 14:06:09.057502837 +0000 UTC m=+1428.855335085" observedRunningTime="2025-11-21 14:06:10.424836472 +0000 UTC m=+1430.222668730" watchObservedRunningTime="2025-11-21 14:06:10.428937291 +0000 UTC m=+1430.226769539" Nov 21 14:06:10 crc kubenswrapper[5133]: I1121 14:06:10.452528 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.770201806 podStartE2EDuration="6.45250177s" podCreationTimestamp="2025-11-21 14:06:04 +0000 UTC" firstStartedPulling="2025-11-21 14:06:05.369570573 +0000 UTC m=+1425.167402821" lastFinishedPulling="2025-11-21 14:06:09.051870537 +0000 UTC m=+1428.849702785" observedRunningTime="2025-11-21 14:06:10.444590959 +0000 UTC m=+1430.242423207" watchObservedRunningTime="2025-11-21 14:06:10.45250177 +0000 UTC m=+1430.250334018" Nov 21 14:06:10 crc kubenswrapper[5133]: I1121 14:06:10.469918 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.645333602 podStartE2EDuration="6.469898555s" podCreationTimestamp="2025-11-21 14:06:04 +0000 UTC" firstStartedPulling="2025-11-21 14:06:05.225969208 +0000 UTC m=+1425.023801456" lastFinishedPulling="2025-11-21 14:06:09.050534161 +0000 UTC m=+1428.848366409" observedRunningTime="2025-11-21 14:06:10.462456556 +0000 UTC m=+1430.260288804" watchObservedRunningTime="2025-11-21 14:06:10.469898555 +0000 UTC m=+1430.267730803" Nov 21 14:06:10 crc kubenswrapper[5133]: I1121 14:06:10.524786 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.698782119 podStartE2EDuration="6.52474896s" podCreationTimestamp="2025-11-21 14:06:04 +0000 UTC" firstStartedPulling="2025-11-21 14:06:05.225937017 +0000 UTC m=+1425.023769265" lastFinishedPulling="2025-11-21 14:06:09.051903858 +0000 UTC m=+1428.849736106" observedRunningTime="2025-11-21 14:06:10.499983148 +0000 UTC m=+1430.297815416" watchObservedRunningTime="2025-11-21 14:06:10.52474896 +0000 UTC m=+1430.322581208" Nov 21 14:06:11 crc kubenswrapper[5133]: I1121 14:06:11.428931 5133 generic.go:334] "Generic (PLEG): container finished" podID="485cb8ff-9a40-4673-82df-ac656fcb037a" containerID="e237530ce3357d94264249d7aecc42b5073deaae82822982eaf855f78b977de2" exitCode=143 Nov 21 14:06:11 crc kubenswrapper[5133]: I1121 14:06:11.429782 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"485cb8ff-9a40-4673-82df-ac656fcb037a","Type":"ContainerDied","Data":"e237530ce3357d94264249d7aecc42b5073deaae82822982eaf855f78b977de2"} Nov 21 14:06:12 crc kubenswrapper[5133]: I1121 14:06:12.135703 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 21 14:06:12 crc kubenswrapper[5133]: I1121 14:06:12.210750 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/485cb8ff-9a40-4673-82df-ac656fcb037a-combined-ca-bundle\") pod \"485cb8ff-9a40-4673-82df-ac656fcb037a\" (UID: \"485cb8ff-9a40-4673-82df-ac656fcb037a\") " Nov 21 14:06:12 crc kubenswrapper[5133]: I1121 14:06:12.210868 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/485cb8ff-9a40-4673-82df-ac656fcb037a-config-data\") pod \"485cb8ff-9a40-4673-82df-ac656fcb037a\" (UID: \"485cb8ff-9a40-4673-82df-ac656fcb037a\") " Nov 21 14:06:12 crc kubenswrapper[5133]: I1121 14:06:12.211671 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q7fnq\" (UniqueName: \"kubernetes.io/projected/485cb8ff-9a40-4673-82df-ac656fcb037a-kube-api-access-q7fnq\") pod \"485cb8ff-9a40-4673-82df-ac656fcb037a\" (UID: \"485cb8ff-9a40-4673-82df-ac656fcb037a\") " Nov 21 14:06:12 crc kubenswrapper[5133]: I1121 14:06:12.211712 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/485cb8ff-9a40-4673-82df-ac656fcb037a-logs\") pod \"485cb8ff-9a40-4673-82df-ac656fcb037a\" (UID: \"485cb8ff-9a40-4673-82df-ac656fcb037a\") " Nov 21 14:06:12 crc kubenswrapper[5133]: I1121 14:06:12.212266 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/485cb8ff-9a40-4673-82df-ac656fcb037a-logs" (OuterVolumeSpecName: "logs") pod "485cb8ff-9a40-4673-82df-ac656fcb037a" (UID: "485cb8ff-9a40-4673-82df-ac656fcb037a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:06:12 crc kubenswrapper[5133]: I1121 14:06:12.216637 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/485cb8ff-9a40-4673-82df-ac656fcb037a-kube-api-access-q7fnq" (OuterVolumeSpecName: "kube-api-access-q7fnq") pod "485cb8ff-9a40-4673-82df-ac656fcb037a" (UID: "485cb8ff-9a40-4673-82df-ac656fcb037a"). InnerVolumeSpecName "kube-api-access-q7fnq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:06:12 crc kubenswrapper[5133]: I1121 14:06:12.235944 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/485cb8ff-9a40-4673-82df-ac656fcb037a-config-data" (OuterVolumeSpecName: "config-data") pod "485cb8ff-9a40-4673-82df-ac656fcb037a" (UID: "485cb8ff-9a40-4673-82df-ac656fcb037a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:06:12 crc kubenswrapper[5133]: I1121 14:06:12.240128 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/485cb8ff-9a40-4673-82df-ac656fcb037a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "485cb8ff-9a40-4673-82df-ac656fcb037a" (UID: "485cb8ff-9a40-4673-82df-ac656fcb037a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:06:12 crc kubenswrapper[5133]: I1121 14:06:12.313796 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q7fnq\" (UniqueName: \"kubernetes.io/projected/485cb8ff-9a40-4673-82df-ac656fcb037a-kube-api-access-q7fnq\") on node \"crc\" DevicePath \"\"" Nov 21 14:06:12 crc kubenswrapper[5133]: I1121 14:06:12.313827 5133 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/485cb8ff-9a40-4673-82df-ac656fcb037a-logs\") on node \"crc\" DevicePath \"\"" Nov 21 14:06:12 crc kubenswrapper[5133]: I1121 14:06:12.313837 5133 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/485cb8ff-9a40-4673-82df-ac656fcb037a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 14:06:12 crc kubenswrapper[5133]: I1121 14:06:12.313847 5133 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/485cb8ff-9a40-4673-82df-ac656fcb037a-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 14:06:12 crc kubenswrapper[5133]: I1121 14:06:12.443960 5133 generic.go:334] "Generic (PLEG): container finished" podID="485cb8ff-9a40-4673-82df-ac656fcb037a" containerID="88061c38c1093a3fcc1bb533fe459144724ec38aec52e008c69bc8cf5f663cd4" exitCode=0 Nov 21 14:06:12 crc kubenswrapper[5133]: I1121 14:06:12.444083 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"485cb8ff-9a40-4673-82df-ac656fcb037a","Type":"ContainerDied","Data":"88061c38c1093a3fcc1bb533fe459144724ec38aec52e008c69bc8cf5f663cd4"} Nov 21 14:06:12 crc kubenswrapper[5133]: I1121 14:06:12.444144 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"485cb8ff-9a40-4673-82df-ac656fcb037a","Type":"ContainerDied","Data":"43db2fc9669b0e7e134691b6dbaaa72adf52468d7af44d3c19b78b12484ca309"} Nov 21 14:06:12 crc kubenswrapper[5133]: I1121 14:06:12.444186 5133 scope.go:117] "RemoveContainer" containerID="88061c38c1093a3fcc1bb533fe459144724ec38aec52e008c69bc8cf5f663cd4" Nov 21 14:06:12 crc kubenswrapper[5133]: I1121 14:06:12.444469 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 21 14:06:12 crc kubenswrapper[5133]: I1121 14:06:12.510371 5133 scope.go:117] "RemoveContainer" containerID="e237530ce3357d94264249d7aecc42b5073deaae82822982eaf855f78b977de2" Nov 21 14:06:12 crc kubenswrapper[5133]: I1121 14:06:12.526274 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 21 14:06:12 crc kubenswrapper[5133]: I1121 14:06:12.543966 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 21 14:06:12 crc kubenswrapper[5133]: I1121 14:06:12.551788 5133 scope.go:117] "RemoveContainer" containerID="88061c38c1093a3fcc1bb533fe459144724ec38aec52e008c69bc8cf5f663cd4" Nov 21 14:06:12 crc kubenswrapper[5133]: E1121 14:06:12.552429 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"88061c38c1093a3fcc1bb533fe459144724ec38aec52e008c69bc8cf5f663cd4\": container with ID starting with 88061c38c1093a3fcc1bb533fe459144724ec38aec52e008c69bc8cf5f663cd4 not found: ID does not exist" containerID="88061c38c1093a3fcc1bb533fe459144724ec38aec52e008c69bc8cf5f663cd4" Nov 21 14:06:12 crc kubenswrapper[5133]: I1121 14:06:12.552468 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"88061c38c1093a3fcc1bb533fe459144724ec38aec52e008c69bc8cf5f663cd4"} err="failed to get container status \"88061c38c1093a3fcc1bb533fe459144724ec38aec52e008c69bc8cf5f663cd4\": rpc error: code = NotFound desc = could not find container \"88061c38c1093a3fcc1bb533fe459144724ec38aec52e008c69bc8cf5f663cd4\": container with ID starting with 88061c38c1093a3fcc1bb533fe459144724ec38aec52e008c69bc8cf5f663cd4 not found: ID does not exist" Nov 21 14:06:12 crc kubenswrapper[5133]: I1121 14:06:12.552493 5133 scope.go:117] "RemoveContainer" containerID="e237530ce3357d94264249d7aecc42b5073deaae82822982eaf855f78b977de2" Nov 21 14:06:12 crc kubenswrapper[5133]: E1121 14:06:12.552808 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e237530ce3357d94264249d7aecc42b5073deaae82822982eaf855f78b977de2\": container with ID starting with e237530ce3357d94264249d7aecc42b5073deaae82822982eaf855f78b977de2 not found: ID does not exist" containerID="e237530ce3357d94264249d7aecc42b5073deaae82822982eaf855f78b977de2" Nov 21 14:06:12 crc kubenswrapper[5133]: I1121 14:06:12.552861 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e237530ce3357d94264249d7aecc42b5073deaae82822982eaf855f78b977de2"} err="failed to get container status \"e237530ce3357d94264249d7aecc42b5073deaae82822982eaf855f78b977de2\": rpc error: code = NotFound desc = could not find container \"e237530ce3357d94264249d7aecc42b5073deaae82822982eaf855f78b977de2\": container with ID starting with e237530ce3357d94264249d7aecc42b5073deaae82822982eaf855f78b977de2 not found: ID does not exist" Nov 21 14:06:12 crc kubenswrapper[5133]: I1121 14:06:12.558884 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 21 14:06:12 crc kubenswrapper[5133]: E1121 14:06:12.559483 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="485cb8ff-9a40-4673-82df-ac656fcb037a" containerName="nova-metadata-metadata" Nov 21 14:06:12 crc kubenswrapper[5133]: I1121 14:06:12.559505 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="485cb8ff-9a40-4673-82df-ac656fcb037a" containerName="nova-metadata-metadata" Nov 21 14:06:12 crc kubenswrapper[5133]: E1121 14:06:12.559552 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="485cb8ff-9a40-4673-82df-ac656fcb037a" containerName="nova-metadata-log" Nov 21 14:06:12 crc kubenswrapper[5133]: I1121 14:06:12.559559 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="485cb8ff-9a40-4673-82df-ac656fcb037a" containerName="nova-metadata-log" Nov 21 14:06:12 crc kubenswrapper[5133]: I1121 14:06:12.559766 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="485cb8ff-9a40-4673-82df-ac656fcb037a" containerName="nova-metadata-metadata" Nov 21 14:06:12 crc kubenswrapper[5133]: I1121 14:06:12.559785 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="485cb8ff-9a40-4673-82df-ac656fcb037a" containerName="nova-metadata-log" Nov 21 14:06:12 crc kubenswrapper[5133]: I1121 14:06:12.560972 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 21 14:06:12 crc kubenswrapper[5133]: I1121 14:06:12.570131 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 21 14:06:12 crc kubenswrapper[5133]: I1121 14:06:12.577081 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 21 14:06:12 crc kubenswrapper[5133]: I1121 14:06:12.577174 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 21 14:06:12 crc kubenswrapper[5133]: I1121 14:06:12.619843 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1f1c02de-a2c5-4930-b61e-435635bc7079-config-data\") pod \"nova-metadata-0\" (UID: \"1f1c02de-a2c5-4930-b61e-435635bc7079\") " pod="openstack/nova-metadata-0" Nov 21 14:06:12 crc kubenswrapper[5133]: I1121 14:06:12.620103 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f1c02de-a2c5-4930-b61e-435635bc7079-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"1f1c02de-a2c5-4930-b61e-435635bc7079\") " pod="openstack/nova-metadata-0" Nov 21 14:06:12 crc kubenswrapper[5133]: I1121 14:06:12.620147 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/1f1c02de-a2c5-4930-b61e-435635bc7079-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"1f1c02de-a2c5-4930-b61e-435635bc7079\") " pod="openstack/nova-metadata-0" Nov 21 14:06:12 crc kubenswrapper[5133]: I1121 14:06:12.620233 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5qt56\" (UniqueName: \"kubernetes.io/projected/1f1c02de-a2c5-4930-b61e-435635bc7079-kube-api-access-5qt56\") pod \"nova-metadata-0\" (UID: \"1f1c02de-a2c5-4930-b61e-435635bc7079\") " pod="openstack/nova-metadata-0" Nov 21 14:06:12 crc kubenswrapper[5133]: I1121 14:06:12.620265 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1f1c02de-a2c5-4930-b61e-435635bc7079-logs\") pod \"nova-metadata-0\" (UID: \"1f1c02de-a2c5-4930-b61e-435635bc7079\") " pod="openstack/nova-metadata-0" Nov 21 14:06:12 crc kubenswrapper[5133]: I1121 14:06:12.722233 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f1c02de-a2c5-4930-b61e-435635bc7079-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"1f1c02de-a2c5-4930-b61e-435635bc7079\") " pod="openstack/nova-metadata-0" Nov 21 14:06:12 crc kubenswrapper[5133]: I1121 14:06:12.722286 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/1f1c02de-a2c5-4930-b61e-435635bc7079-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"1f1c02de-a2c5-4930-b61e-435635bc7079\") " pod="openstack/nova-metadata-0" Nov 21 14:06:12 crc kubenswrapper[5133]: I1121 14:06:12.722355 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5qt56\" (UniqueName: \"kubernetes.io/projected/1f1c02de-a2c5-4930-b61e-435635bc7079-kube-api-access-5qt56\") pod \"nova-metadata-0\" (UID: \"1f1c02de-a2c5-4930-b61e-435635bc7079\") " pod="openstack/nova-metadata-0" Nov 21 14:06:12 crc kubenswrapper[5133]: I1121 14:06:12.722375 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1f1c02de-a2c5-4930-b61e-435635bc7079-logs\") pod \"nova-metadata-0\" (UID: \"1f1c02de-a2c5-4930-b61e-435635bc7079\") " pod="openstack/nova-metadata-0" Nov 21 14:06:12 crc kubenswrapper[5133]: I1121 14:06:12.722408 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1f1c02de-a2c5-4930-b61e-435635bc7079-config-data\") pod \"nova-metadata-0\" (UID: \"1f1c02de-a2c5-4930-b61e-435635bc7079\") " pod="openstack/nova-metadata-0" Nov 21 14:06:12 crc kubenswrapper[5133]: I1121 14:06:12.723066 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1f1c02de-a2c5-4930-b61e-435635bc7079-logs\") pod \"nova-metadata-0\" (UID: \"1f1c02de-a2c5-4930-b61e-435635bc7079\") " pod="openstack/nova-metadata-0" Nov 21 14:06:12 crc kubenswrapper[5133]: I1121 14:06:12.727420 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f1c02de-a2c5-4930-b61e-435635bc7079-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"1f1c02de-a2c5-4930-b61e-435635bc7079\") " pod="openstack/nova-metadata-0" Nov 21 14:06:12 crc kubenswrapper[5133]: I1121 14:06:12.727818 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1f1c02de-a2c5-4930-b61e-435635bc7079-config-data\") pod \"nova-metadata-0\" (UID: \"1f1c02de-a2c5-4930-b61e-435635bc7079\") " pod="openstack/nova-metadata-0" Nov 21 14:06:12 crc kubenswrapper[5133]: I1121 14:06:12.730155 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/1f1c02de-a2c5-4930-b61e-435635bc7079-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"1f1c02de-a2c5-4930-b61e-435635bc7079\") " pod="openstack/nova-metadata-0" Nov 21 14:06:12 crc kubenswrapper[5133]: I1121 14:06:12.751364 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5qt56\" (UniqueName: \"kubernetes.io/projected/1f1c02de-a2c5-4930-b61e-435635bc7079-kube-api-access-5qt56\") pod \"nova-metadata-0\" (UID: \"1f1c02de-a2c5-4930-b61e-435635bc7079\") " pod="openstack/nova-metadata-0" Nov 21 14:06:12 crc kubenswrapper[5133]: I1121 14:06:12.893978 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 21 14:06:13 crc kubenswrapper[5133]: I1121 14:06:13.436712 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 21 14:06:13 crc kubenswrapper[5133]: I1121 14:06:13.465430 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"1f1c02de-a2c5-4930-b61e-435635bc7079","Type":"ContainerStarted","Data":"38ea8a746c4cbe660af9aec15a4b5363ae3e7bbb1bf77aaf375580e43125c31a"} Nov 21 14:06:14 crc kubenswrapper[5133]: I1121 14:06:14.468454 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="485cb8ff-9a40-4673-82df-ac656fcb037a" path="/var/lib/kubelet/pods/485cb8ff-9a40-4673-82df-ac656fcb037a/volumes" Nov 21 14:06:14 crc kubenswrapper[5133]: I1121 14:06:14.475482 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"1f1c02de-a2c5-4930-b61e-435635bc7079","Type":"ContainerStarted","Data":"91ff910985bd6bc441d1d4a93787eb3a317cfe6536dc9bce28caf1f9b8a0e348"} Nov 21 14:06:14 crc kubenswrapper[5133]: I1121 14:06:14.475525 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"1f1c02de-a2c5-4930-b61e-435635bc7079","Type":"ContainerStarted","Data":"b30edc14371a75fac9d9e54dc8fc833a5c01af0dad0c5f68dc14ddea15333612"} Nov 21 14:06:14 crc kubenswrapper[5133]: I1121 14:06:14.513444 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.513415165 podStartE2EDuration="2.513415165s" podCreationTimestamp="2025-11-21 14:06:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 14:06:14.494679425 +0000 UTC m=+1434.292511723" watchObservedRunningTime="2025-11-21 14:06:14.513415165 +0000 UTC m=+1434.311247423" Nov 21 14:06:14 crc kubenswrapper[5133]: I1121 14:06:14.611077 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 21 14:06:14 crc kubenswrapper[5133]: I1121 14:06:14.611503 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 21 14:06:14 crc kubenswrapper[5133]: I1121 14:06:14.863325 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-8b8cf6657-9zpfq" Nov 21 14:06:14 crc kubenswrapper[5133]: I1121 14:06:14.909687 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 21 14:06:14 crc kubenswrapper[5133]: I1121 14:06:14.909755 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 21 14:06:14 crc kubenswrapper[5133]: I1121 14:06:14.940224 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-58db5546cc-m2zdv"] Nov 21 14:06:14 crc kubenswrapper[5133]: I1121 14:06:14.940536 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-58db5546cc-m2zdv" podUID="169191a4-579a-4818-be80-bd64998e72a1" containerName="dnsmasq-dns" containerID="cri-o://db8ea5ebfe98df32bba95baf1d4f81c88c690b023887dd2e2f44297073139ebb" gracePeriod=10 Nov 21 14:06:14 crc kubenswrapper[5133]: I1121 14:06:14.943124 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 21 14:06:15 crc kubenswrapper[5133]: I1121 14:06:14.997557 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 21 14:06:15 crc kubenswrapper[5133]: I1121 14:06:15.476657 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-58db5546cc-m2zdv" Nov 21 14:06:15 crc kubenswrapper[5133]: I1121 14:06:15.506169 5133 generic.go:334] "Generic (PLEG): container finished" podID="169191a4-579a-4818-be80-bd64998e72a1" containerID="db8ea5ebfe98df32bba95baf1d4f81c88c690b023887dd2e2f44297073139ebb" exitCode=0 Nov 21 14:06:15 crc kubenswrapper[5133]: I1121 14:06:15.506262 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-58db5546cc-m2zdv" event={"ID":"169191a4-579a-4818-be80-bd64998e72a1","Type":"ContainerDied","Data":"db8ea5ebfe98df32bba95baf1d4f81c88c690b023887dd2e2f44297073139ebb"} Nov 21 14:06:15 crc kubenswrapper[5133]: I1121 14:06:15.506301 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-58db5546cc-m2zdv" event={"ID":"169191a4-579a-4818-be80-bd64998e72a1","Type":"ContainerDied","Data":"af03e8d8f98f99e83743c383897c935ffc12e8534fb16042dcd560f79bcf32ab"} Nov 21 14:06:15 crc kubenswrapper[5133]: I1121 14:06:15.506322 5133 scope.go:117] "RemoveContainer" containerID="db8ea5ebfe98df32bba95baf1d4f81c88c690b023887dd2e2f44297073139ebb" Nov 21 14:06:15 crc kubenswrapper[5133]: I1121 14:06:15.506484 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-58db5546cc-m2zdv" Nov 21 14:06:15 crc kubenswrapper[5133]: I1121 14:06:15.515206 5133 generic.go:334] "Generic (PLEG): container finished" podID="18aab30d-6eea-41e2-966c-371f6e0a2e76" containerID="6027cdfe5863f852ffa876e53682937454491f8d55703dfcdd48068add898d82" exitCode=0 Nov 21 14:06:15 crc kubenswrapper[5133]: I1121 14:06:15.515324 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-pk7zb" event={"ID":"18aab30d-6eea-41e2-966c-371f6e0a2e76","Type":"ContainerDied","Data":"6027cdfe5863f852ffa876e53682937454491f8d55703dfcdd48068add898d82"} Nov 21 14:06:15 crc kubenswrapper[5133]: I1121 14:06:15.566183 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 21 14:06:15 crc kubenswrapper[5133]: I1121 14:06:15.576930 5133 scope.go:117] "RemoveContainer" containerID="200a2a8233cefd447a4af9b72a9f5316962f04c159599d478a43649dadf8a74f" Nov 21 14:06:15 crc kubenswrapper[5133]: I1121 14:06:15.605220 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/169191a4-579a-4818-be80-bd64998e72a1-ovsdbserver-sb\") pod \"169191a4-579a-4818-be80-bd64998e72a1\" (UID: \"169191a4-579a-4818-be80-bd64998e72a1\") " Nov 21 14:06:15 crc kubenswrapper[5133]: I1121 14:06:15.605288 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/169191a4-579a-4818-be80-bd64998e72a1-ovsdbserver-nb\") pod \"169191a4-579a-4818-be80-bd64998e72a1\" (UID: \"169191a4-579a-4818-be80-bd64998e72a1\") " Nov 21 14:06:15 crc kubenswrapper[5133]: I1121 14:06:15.605380 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/169191a4-579a-4818-be80-bd64998e72a1-dns-svc\") pod \"169191a4-579a-4818-be80-bd64998e72a1\" (UID: \"169191a4-579a-4818-be80-bd64998e72a1\") " Nov 21 14:06:15 crc kubenswrapper[5133]: I1121 14:06:15.605466 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/169191a4-579a-4818-be80-bd64998e72a1-config\") pod \"169191a4-579a-4818-be80-bd64998e72a1\" (UID: \"169191a4-579a-4818-be80-bd64998e72a1\") " Nov 21 14:06:15 crc kubenswrapper[5133]: I1121 14:06:15.605501 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8w62c\" (UniqueName: \"kubernetes.io/projected/169191a4-579a-4818-be80-bd64998e72a1-kube-api-access-8w62c\") pod \"169191a4-579a-4818-be80-bd64998e72a1\" (UID: \"169191a4-579a-4818-be80-bd64998e72a1\") " Nov 21 14:06:15 crc kubenswrapper[5133]: I1121 14:06:15.610561 5133 scope.go:117] "RemoveContainer" containerID="db8ea5ebfe98df32bba95baf1d4f81c88c690b023887dd2e2f44297073139ebb" Nov 21 14:06:15 crc kubenswrapper[5133]: I1121 14:06:15.610997 5133 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="5ed3ebfb-e282-425f-b086-f90ed8782d39" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.165:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 21 14:06:15 crc kubenswrapper[5133]: I1121 14:06:15.611169 5133 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="5ed3ebfb-e282-425f-b086-f90ed8782d39" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.165:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 21 14:06:15 crc kubenswrapper[5133]: E1121 14:06:15.611770 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"db8ea5ebfe98df32bba95baf1d4f81c88c690b023887dd2e2f44297073139ebb\": container with ID starting with db8ea5ebfe98df32bba95baf1d4f81c88c690b023887dd2e2f44297073139ebb not found: ID does not exist" containerID="db8ea5ebfe98df32bba95baf1d4f81c88c690b023887dd2e2f44297073139ebb" Nov 21 14:06:15 crc kubenswrapper[5133]: I1121 14:06:15.611798 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"db8ea5ebfe98df32bba95baf1d4f81c88c690b023887dd2e2f44297073139ebb"} err="failed to get container status \"db8ea5ebfe98df32bba95baf1d4f81c88c690b023887dd2e2f44297073139ebb\": rpc error: code = NotFound desc = could not find container \"db8ea5ebfe98df32bba95baf1d4f81c88c690b023887dd2e2f44297073139ebb\": container with ID starting with db8ea5ebfe98df32bba95baf1d4f81c88c690b023887dd2e2f44297073139ebb not found: ID does not exist" Nov 21 14:06:15 crc kubenswrapper[5133]: I1121 14:06:15.611818 5133 scope.go:117] "RemoveContainer" containerID="200a2a8233cefd447a4af9b72a9f5316962f04c159599d478a43649dadf8a74f" Nov 21 14:06:15 crc kubenswrapper[5133]: I1121 14:06:15.613876 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/169191a4-579a-4818-be80-bd64998e72a1-kube-api-access-8w62c" (OuterVolumeSpecName: "kube-api-access-8w62c") pod "169191a4-579a-4818-be80-bd64998e72a1" (UID: "169191a4-579a-4818-be80-bd64998e72a1"). InnerVolumeSpecName "kube-api-access-8w62c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:06:15 crc kubenswrapper[5133]: E1121 14:06:15.615309 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"200a2a8233cefd447a4af9b72a9f5316962f04c159599d478a43649dadf8a74f\": container with ID starting with 200a2a8233cefd447a4af9b72a9f5316962f04c159599d478a43649dadf8a74f not found: ID does not exist" containerID="200a2a8233cefd447a4af9b72a9f5316962f04c159599d478a43649dadf8a74f" Nov 21 14:06:15 crc kubenswrapper[5133]: I1121 14:06:15.615335 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"200a2a8233cefd447a4af9b72a9f5316962f04c159599d478a43649dadf8a74f"} err="failed to get container status \"200a2a8233cefd447a4af9b72a9f5316962f04c159599d478a43649dadf8a74f\": rpc error: code = NotFound desc = could not find container \"200a2a8233cefd447a4af9b72a9f5316962f04c159599d478a43649dadf8a74f\": container with ID starting with 200a2a8233cefd447a4af9b72a9f5316962f04c159599d478a43649dadf8a74f not found: ID does not exist" Nov 21 14:06:15 crc kubenswrapper[5133]: I1121 14:06:15.663928 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/169191a4-579a-4818-be80-bd64998e72a1-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "169191a4-579a-4818-be80-bd64998e72a1" (UID: "169191a4-579a-4818-be80-bd64998e72a1"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:06:15 crc kubenswrapper[5133]: I1121 14:06:15.664088 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/169191a4-579a-4818-be80-bd64998e72a1-config" (OuterVolumeSpecName: "config") pod "169191a4-579a-4818-be80-bd64998e72a1" (UID: "169191a4-579a-4818-be80-bd64998e72a1"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:06:15 crc kubenswrapper[5133]: I1121 14:06:15.681771 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/169191a4-579a-4818-be80-bd64998e72a1-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "169191a4-579a-4818-be80-bd64998e72a1" (UID: "169191a4-579a-4818-be80-bd64998e72a1"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:06:15 crc kubenswrapper[5133]: I1121 14:06:15.710087 5133 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/169191a4-579a-4818-be80-bd64998e72a1-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 21 14:06:15 crc kubenswrapper[5133]: I1121 14:06:15.710133 5133 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/169191a4-579a-4818-be80-bd64998e72a1-config\") on node \"crc\" DevicePath \"\"" Nov 21 14:06:15 crc kubenswrapper[5133]: I1121 14:06:15.710143 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8w62c\" (UniqueName: \"kubernetes.io/projected/169191a4-579a-4818-be80-bd64998e72a1-kube-api-access-8w62c\") on node \"crc\" DevicePath \"\"" Nov 21 14:06:15 crc kubenswrapper[5133]: I1121 14:06:15.710156 5133 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/169191a4-579a-4818-be80-bd64998e72a1-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 21 14:06:15 crc kubenswrapper[5133]: I1121 14:06:15.751507 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/169191a4-579a-4818-be80-bd64998e72a1-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "169191a4-579a-4818-be80-bd64998e72a1" (UID: "169191a4-579a-4818-be80-bd64998e72a1"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:06:15 crc kubenswrapper[5133]: I1121 14:06:15.812863 5133 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/169191a4-579a-4818-be80-bd64998e72a1-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 21 14:06:15 crc kubenswrapper[5133]: I1121 14:06:15.854516 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-58db5546cc-m2zdv"] Nov 21 14:06:15 crc kubenswrapper[5133]: I1121 14:06:15.866098 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-58db5546cc-m2zdv"] Nov 21 14:06:16 crc kubenswrapper[5133]: I1121 14:06:16.420616 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 21 14:06:16 crc kubenswrapper[5133]: I1121 14:06:16.470538 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="169191a4-579a-4818-be80-bd64998e72a1" path="/var/lib/kubelet/pods/169191a4-579a-4818-be80-bd64998e72a1/volumes" Nov 21 14:06:16 crc kubenswrapper[5133]: I1121 14:06:16.894856 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-pk7zb" Nov 21 14:06:17 crc kubenswrapper[5133]: I1121 14:06:17.037347 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ngwt\" (UniqueName: \"kubernetes.io/projected/18aab30d-6eea-41e2-966c-371f6e0a2e76-kube-api-access-6ngwt\") pod \"18aab30d-6eea-41e2-966c-371f6e0a2e76\" (UID: \"18aab30d-6eea-41e2-966c-371f6e0a2e76\") " Nov 21 14:06:17 crc kubenswrapper[5133]: I1121 14:06:17.037524 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/18aab30d-6eea-41e2-966c-371f6e0a2e76-combined-ca-bundle\") pod \"18aab30d-6eea-41e2-966c-371f6e0a2e76\" (UID: \"18aab30d-6eea-41e2-966c-371f6e0a2e76\") " Nov 21 14:06:17 crc kubenswrapper[5133]: I1121 14:06:17.037705 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/18aab30d-6eea-41e2-966c-371f6e0a2e76-config-data\") pod \"18aab30d-6eea-41e2-966c-371f6e0a2e76\" (UID: \"18aab30d-6eea-41e2-966c-371f6e0a2e76\") " Nov 21 14:06:17 crc kubenswrapper[5133]: I1121 14:06:17.037767 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/18aab30d-6eea-41e2-966c-371f6e0a2e76-scripts\") pod \"18aab30d-6eea-41e2-966c-371f6e0a2e76\" (UID: \"18aab30d-6eea-41e2-966c-371f6e0a2e76\") " Nov 21 14:06:17 crc kubenswrapper[5133]: I1121 14:06:17.046233 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/18aab30d-6eea-41e2-966c-371f6e0a2e76-scripts" (OuterVolumeSpecName: "scripts") pod "18aab30d-6eea-41e2-966c-371f6e0a2e76" (UID: "18aab30d-6eea-41e2-966c-371f6e0a2e76"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:06:17 crc kubenswrapper[5133]: I1121 14:06:17.046290 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/18aab30d-6eea-41e2-966c-371f6e0a2e76-kube-api-access-6ngwt" (OuterVolumeSpecName: "kube-api-access-6ngwt") pod "18aab30d-6eea-41e2-966c-371f6e0a2e76" (UID: "18aab30d-6eea-41e2-966c-371f6e0a2e76"). InnerVolumeSpecName "kube-api-access-6ngwt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:06:17 crc kubenswrapper[5133]: I1121 14:06:17.070167 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/18aab30d-6eea-41e2-966c-371f6e0a2e76-config-data" (OuterVolumeSpecName: "config-data") pod "18aab30d-6eea-41e2-966c-371f6e0a2e76" (UID: "18aab30d-6eea-41e2-966c-371f6e0a2e76"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:06:17 crc kubenswrapper[5133]: I1121 14:06:17.078337 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/18aab30d-6eea-41e2-966c-371f6e0a2e76-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "18aab30d-6eea-41e2-966c-371f6e0a2e76" (UID: "18aab30d-6eea-41e2-966c-371f6e0a2e76"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:06:17 crc kubenswrapper[5133]: I1121 14:06:17.140118 5133 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/18aab30d-6eea-41e2-966c-371f6e0a2e76-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 14:06:17 crc kubenswrapper[5133]: I1121 14:06:17.140187 5133 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/18aab30d-6eea-41e2-966c-371f6e0a2e76-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 14:06:17 crc kubenswrapper[5133]: I1121 14:06:17.140205 5133 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/18aab30d-6eea-41e2-966c-371f6e0a2e76-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 14:06:17 crc kubenswrapper[5133]: I1121 14:06:17.140223 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ngwt\" (UniqueName: \"kubernetes.io/projected/18aab30d-6eea-41e2-966c-371f6e0a2e76-kube-api-access-6ngwt\") on node \"crc\" DevicePath \"\"" Nov 21 14:06:17 crc kubenswrapper[5133]: I1121 14:06:17.536256 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-pk7zb" Nov 21 14:06:17 crc kubenswrapper[5133]: I1121 14:06:17.536262 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-pk7zb" event={"ID":"18aab30d-6eea-41e2-966c-371f6e0a2e76","Type":"ContainerDied","Data":"83ca9432bb3afde3049dab43626f8631acc1b56cef30f0f3c037a9a93695848c"} Nov 21 14:06:17 crc kubenswrapper[5133]: I1121 14:06:17.536644 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="83ca9432bb3afde3049dab43626f8631acc1b56cef30f0f3c037a9a93695848c" Nov 21 14:06:17 crc kubenswrapper[5133]: I1121 14:06:17.537785 5133 generic.go:334] "Generic (PLEG): container finished" podID="32a69b88-3e19-4aca-ad8e-0a5b35c136cf" containerID="0f599c0d88facb1c35cf520e99b361a218cd88f867257177221f74b93266befd" exitCode=0 Nov 21 14:06:17 crc kubenswrapper[5133]: I1121 14:06:17.537846 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-bf6r4" event={"ID":"32a69b88-3e19-4aca-ad8e-0a5b35c136cf","Type":"ContainerDied","Data":"0f599c0d88facb1c35cf520e99b361a218cd88f867257177221f74b93266befd"} Nov 21 14:06:17 crc kubenswrapper[5133]: I1121 14:06:17.737296 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 21 14:06:17 crc kubenswrapper[5133]: I1121 14:06:17.737678 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="bc5ef4e0-ce6d-4519-b4fc-b7aa0d04c8a8" containerName="nova-scheduler-scheduler" containerID="cri-o://e301a655d4c2dfa4770a6ec603f88db379353fde6256c0b5d2cf428fc090a259" gracePeriod=30 Nov 21 14:06:17 crc kubenswrapper[5133]: I1121 14:06:17.756337 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 21 14:06:17 crc kubenswrapper[5133]: I1121 14:06:17.756639 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="5ed3ebfb-e282-425f-b086-f90ed8782d39" containerName="nova-api-log" containerID="cri-o://502e1733fbfa31392cd21daf95af8efcdb946ce979257b4b5bd795bf39e718d1" gracePeriod=30 Nov 21 14:06:17 crc kubenswrapper[5133]: I1121 14:06:17.757224 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="5ed3ebfb-e282-425f-b086-f90ed8782d39" containerName="nova-api-api" containerID="cri-o://219eae399ca3e89586a3268c46d406cf6893ca3257f669a4e8d3da5456cf9a6c" gracePeriod=30 Nov 21 14:06:17 crc kubenswrapper[5133]: I1121 14:06:17.779346 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 21 14:06:17 crc kubenswrapper[5133]: I1121 14:06:17.780648 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="1f1c02de-a2c5-4930-b61e-435635bc7079" containerName="nova-metadata-log" containerID="cri-o://b30edc14371a75fac9d9e54dc8fc833a5c01af0dad0c5f68dc14ddea15333612" gracePeriod=30 Nov 21 14:06:17 crc kubenswrapper[5133]: I1121 14:06:17.780817 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="1f1c02de-a2c5-4930-b61e-435635bc7079" containerName="nova-metadata-metadata" containerID="cri-o://91ff910985bd6bc441d1d4a93787eb3a317cfe6536dc9bce28caf1f9b8a0e348" gracePeriod=30 Nov 21 14:06:17 crc kubenswrapper[5133]: I1121 14:06:17.895033 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 21 14:06:17 crc kubenswrapper[5133]: I1121 14:06:17.895100 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 21 14:06:18 crc kubenswrapper[5133]: I1121 14:06:18.381778 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 21 14:06:18 crc kubenswrapper[5133]: I1121 14:06:18.465484 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f1c02de-a2c5-4930-b61e-435635bc7079-combined-ca-bundle\") pod \"1f1c02de-a2c5-4930-b61e-435635bc7079\" (UID: \"1f1c02de-a2c5-4930-b61e-435635bc7079\") " Nov 21 14:06:18 crc kubenswrapper[5133]: I1121 14:06:18.465587 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5qt56\" (UniqueName: \"kubernetes.io/projected/1f1c02de-a2c5-4930-b61e-435635bc7079-kube-api-access-5qt56\") pod \"1f1c02de-a2c5-4930-b61e-435635bc7079\" (UID: \"1f1c02de-a2c5-4930-b61e-435635bc7079\") " Nov 21 14:06:18 crc kubenswrapper[5133]: I1121 14:06:18.465643 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1f1c02de-a2c5-4930-b61e-435635bc7079-config-data\") pod \"1f1c02de-a2c5-4930-b61e-435635bc7079\" (UID: \"1f1c02de-a2c5-4930-b61e-435635bc7079\") " Nov 21 14:06:18 crc kubenswrapper[5133]: I1121 14:06:18.465983 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1f1c02de-a2c5-4930-b61e-435635bc7079-logs\") pod \"1f1c02de-a2c5-4930-b61e-435635bc7079\" (UID: \"1f1c02de-a2c5-4930-b61e-435635bc7079\") " Nov 21 14:06:18 crc kubenswrapper[5133]: I1121 14:06:18.466027 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/1f1c02de-a2c5-4930-b61e-435635bc7079-nova-metadata-tls-certs\") pod \"1f1c02de-a2c5-4930-b61e-435635bc7079\" (UID: \"1f1c02de-a2c5-4930-b61e-435635bc7079\") " Nov 21 14:06:18 crc kubenswrapper[5133]: I1121 14:06:18.466756 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1f1c02de-a2c5-4930-b61e-435635bc7079-logs" (OuterVolumeSpecName: "logs") pod "1f1c02de-a2c5-4930-b61e-435635bc7079" (UID: "1f1c02de-a2c5-4930-b61e-435635bc7079"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:06:18 crc kubenswrapper[5133]: I1121 14:06:18.467209 5133 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1f1c02de-a2c5-4930-b61e-435635bc7079-logs\") on node \"crc\" DevicePath \"\"" Nov 21 14:06:18 crc kubenswrapper[5133]: I1121 14:06:18.476265 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1f1c02de-a2c5-4930-b61e-435635bc7079-kube-api-access-5qt56" (OuterVolumeSpecName: "kube-api-access-5qt56") pod "1f1c02de-a2c5-4930-b61e-435635bc7079" (UID: "1f1c02de-a2c5-4930-b61e-435635bc7079"). InnerVolumeSpecName "kube-api-access-5qt56". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:06:18 crc kubenswrapper[5133]: I1121 14:06:18.524174 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1f1c02de-a2c5-4930-b61e-435635bc7079-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "1f1c02de-a2c5-4930-b61e-435635bc7079" (UID: "1f1c02de-a2c5-4930-b61e-435635bc7079"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:06:18 crc kubenswrapper[5133]: I1121 14:06:18.525602 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1f1c02de-a2c5-4930-b61e-435635bc7079-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1f1c02de-a2c5-4930-b61e-435635bc7079" (UID: "1f1c02de-a2c5-4930-b61e-435635bc7079"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:06:18 crc kubenswrapper[5133]: I1121 14:06:18.526201 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1f1c02de-a2c5-4930-b61e-435635bc7079-config-data" (OuterVolumeSpecName: "config-data") pod "1f1c02de-a2c5-4930-b61e-435635bc7079" (UID: "1f1c02de-a2c5-4930-b61e-435635bc7079"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:06:18 crc kubenswrapper[5133]: I1121 14:06:18.569116 5133 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/1f1c02de-a2c5-4930-b61e-435635bc7079-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 21 14:06:18 crc kubenswrapper[5133]: I1121 14:06:18.569163 5133 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f1c02de-a2c5-4930-b61e-435635bc7079-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 14:06:18 crc kubenswrapper[5133]: I1121 14:06:18.569180 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5qt56\" (UniqueName: \"kubernetes.io/projected/1f1c02de-a2c5-4930-b61e-435635bc7079-kube-api-access-5qt56\") on node \"crc\" DevicePath \"\"" Nov 21 14:06:18 crc kubenswrapper[5133]: I1121 14:06:18.569190 5133 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1f1c02de-a2c5-4930-b61e-435635bc7079-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 14:06:18 crc kubenswrapper[5133]: I1121 14:06:18.569905 5133 generic.go:334] "Generic (PLEG): container finished" podID="5ed3ebfb-e282-425f-b086-f90ed8782d39" containerID="502e1733fbfa31392cd21daf95af8efcdb946ce979257b4b5bd795bf39e718d1" exitCode=143 Nov 21 14:06:18 crc kubenswrapper[5133]: I1121 14:06:18.584379 5133 generic.go:334] "Generic (PLEG): container finished" podID="1f1c02de-a2c5-4930-b61e-435635bc7079" containerID="91ff910985bd6bc441d1d4a93787eb3a317cfe6536dc9bce28caf1f9b8a0e348" exitCode=0 Nov 21 14:06:18 crc kubenswrapper[5133]: I1121 14:06:18.584407 5133 generic.go:334] "Generic (PLEG): container finished" podID="1f1c02de-a2c5-4930-b61e-435635bc7079" containerID="b30edc14371a75fac9d9e54dc8fc833a5c01af0dad0c5f68dc14ddea15333612" exitCode=143 Nov 21 14:06:18 crc kubenswrapper[5133]: I1121 14:06:18.584648 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 21 14:06:18 crc kubenswrapper[5133]: I1121 14:06:18.591061 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"5ed3ebfb-e282-425f-b086-f90ed8782d39","Type":"ContainerDied","Data":"502e1733fbfa31392cd21daf95af8efcdb946ce979257b4b5bd795bf39e718d1"} Nov 21 14:06:18 crc kubenswrapper[5133]: I1121 14:06:18.591123 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"1f1c02de-a2c5-4930-b61e-435635bc7079","Type":"ContainerDied","Data":"91ff910985bd6bc441d1d4a93787eb3a317cfe6536dc9bce28caf1f9b8a0e348"} Nov 21 14:06:18 crc kubenswrapper[5133]: I1121 14:06:18.591146 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"1f1c02de-a2c5-4930-b61e-435635bc7079","Type":"ContainerDied","Data":"b30edc14371a75fac9d9e54dc8fc833a5c01af0dad0c5f68dc14ddea15333612"} Nov 21 14:06:18 crc kubenswrapper[5133]: I1121 14:06:18.591159 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"1f1c02de-a2c5-4930-b61e-435635bc7079","Type":"ContainerDied","Data":"38ea8a746c4cbe660af9aec15a4b5363ae3e7bbb1bf77aaf375580e43125c31a"} Nov 21 14:06:18 crc kubenswrapper[5133]: I1121 14:06:18.591184 5133 scope.go:117] "RemoveContainer" containerID="91ff910985bd6bc441d1d4a93787eb3a317cfe6536dc9bce28caf1f9b8a0e348" Nov 21 14:06:18 crc kubenswrapper[5133]: I1121 14:06:18.630392 5133 scope.go:117] "RemoveContainer" containerID="b30edc14371a75fac9d9e54dc8fc833a5c01af0dad0c5f68dc14ddea15333612" Nov 21 14:06:18 crc kubenswrapper[5133]: I1121 14:06:18.640083 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 21 14:06:18 crc kubenswrapper[5133]: I1121 14:06:18.662819 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 21 14:06:18 crc kubenswrapper[5133]: I1121 14:06:18.673798 5133 scope.go:117] "RemoveContainer" containerID="91ff910985bd6bc441d1d4a93787eb3a317cfe6536dc9bce28caf1f9b8a0e348" Nov 21 14:06:18 crc kubenswrapper[5133]: E1121 14:06:18.674663 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"91ff910985bd6bc441d1d4a93787eb3a317cfe6536dc9bce28caf1f9b8a0e348\": container with ID starting with 91ff910985bd6bc441d1d4a93787eb3a317cfe6536dc9bce28caf1f9b8a0e348 not found: ID does not exist" containerID="91ff910985bd6bc441d1d4a93787eb3a317cfe6536dc9bce28caf1f9b8a0e348" Nov 21 14:06:18 crc kubenswrapper[5133]: I1121 14:06:18.674687 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"91ff910985bd6bc441d1d4a93787eb3a317cfe6536dc9bce28caf1f9b8a0e348"} err="failed to get container status \"91ff910985bd6bc441d1d4a93787eb3a317cfe6536dc9bce28caf1f9b8a0e348\": rpc error: code = NotFound desc = could not find container \"91ff910985bd6bc441d1d4a93787eb3a317cfe6536dc9bce28caf1f9b8a0e348\": container with ID starting with 91ff910985bd6bc441d1d4a93787eb3a317cfe6536dc9bce28caf1f9b8a0e348 not found: ID does not exist" Nov 21 14:06:18 crc kubenswrapper[5133]: I1121 14:06:18.674709 5133 scope.go:117] "RemoveContainer" containerID="b30edc14371a75fac9d9e54dc8fc833a5c01af0dad0c5f68dc14ddea15333612" Nov 21 14:06:18 crc kubenswrapper[5133]: E1121 14:06:18.675096 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b30edc14371a75fac9d9e54dc8fc833a5c01af0dad0c5f68dc14ddea15333612\": container with ID starting with b30edc14371a75fac9d9e54dc8fc833a5c01af0dad0c5f68dc14ddea15333612 not found: ID does not exist" containerID="b30edc14371a75fac9d9e54dc8fc833a5c01af0dad0c5f68dc14ddea15333612" Nov 21 14:06:18 crc kubenswrapper[5133]: I1121 14:06:18.675136 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b30edc14371a75fac9d9e54dc8fc833a5c01af0dad0c5f68dc14ddea15333612"} err="failed to get container status \"b30edc14371a75fac9d9e54dc8fc833a5c01af0dad0c5f68dc14ddea15333612\": rpc error: code = NotFound desc = could not find container \"b30edc14371a75fac9d9e54dc8fc833a5c01af0dad0c5f68dc14ddea15333612\": container with ID starting with b30edc14371a75fac9d9e54dc8fc833a5c01af0dad0c5f68dc14ddea15333612 not found: ID does not exist" Nov 21 14:06:18 crc kubenswrapper[5133]: I1121 14:06:18.675164 5133 scope.go:117] "RemoveContainer" containerID="91ff910985bd6bc441d1d4a93787eb3a317cfe6536dc9bce28caf1f9b8a0e348" Nov 21 14:06:18 crc kubenswrapper[5133]: I1121 14:06:18.675154 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 21 14:06:18 crc kubenswrapper[5133]: I1121 14:06:18.675592 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"91ff910985bd6bc441d1d4a93787eb3a317cfe6536dc9bce28caf1f9b8a0e348"} err="failed to get container status \"91ff910985bd6bc441d1d4a93787eb3a317cfe6536dc9bce28caf1f9b8a0e348\": rpc error: code = NotFound desc = could not find container \"91ff910985bd6bc441d1d4a93787eb3a317cfe6536dc9bce28caf1f9b8a0e348\": container with ID starting with 91ff910985bd6bc441d1d4a93787eb3a317cfe6536dc9bce28caf1f9b8a0e348 not found: ID does not exist" Nov 21 14:06:18 crc kubenswrapper[5133]: I1121 14:06:18.675611 5133 scope.go:117] "RemoveContainer" containerID="b30edc14371a75fac9d9e54dc8fc833a5c01af0dad0c5f68dc14ddea15333612" Nov 21 14:06:18 crc kubenswrapper[5133]: E1121 14:06:18.675763 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="18aab30d-6eea-41e2-966c-371f6e0a2e76" containerName="nova-manage" Nov 21 14:06:18 crc kubenswrapper[5133]: I1121 14:06:18.675781 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="18aab30d-6eea-41e2-966c-371f6e0a2e76" containerName="nova-manage" Nov 21 14:06:18 crc kubenswrapper[5133]: E1121 14:06:18.675798 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1f1c02de-a2c5-4930-b61e-435635bc7079" containerName="nova-metadata-log" Nov 21 14:06:18 crc kubenswrapper[5133]: I1121 14:06:18.675805 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="1f1c02de-a2c5-4930-b61e-435635bc7079" containerName="nova-metadata-log" Nov 21 14:06:18 crc kubenswrapper[5133]: E1121 14:06:18.675820 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1f1c02de-a2c5-4930-b61e-435635bc7079" containerName="nova-metadata-metadata" Nov 21 14:06:18 crc kubenswrapper[5133]: I1121 14:06:18.675828 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="1f1c02de-a2c5-4930-b61e-435635bc7079" containerName="nova-metadata-metadata" Nov 21 14:06:18 crc kubenswrapper[5133]: E1121 14:06:18.675845 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="169191a4-579a-4818-be80-bd64998e72a1" containerName="init" Nov 21 14:06:18 crc kubenswrapper[5133]: I1121 14:06:18.675852 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="169191a4-579a-4818-be80-bd64998e72a1" containerName="init" Nov 21 14:06:18 crc kubenswrapper[5133]: E1121 14:06:18.675876 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="169191a4-579a-4818-be80-bd64998e72a1" containerName="dnsmasq-dns" Nov 21 14:06:18 crc kubenswrapper[5133]: I1121 14:06:18.675884 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="169191a4-579a-4818-be80-bd64998e72a1" containerName="dnsmasq-dns" Nov 21 14:06:18 crc kubenswrapper[5133]: I1121 14:06:18.675934 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b30edc14371a75fac9d9e54dc8fc833a5c01af0dad0c5f68dc14ddea15333612"} err="failed to get container status \"b30edc14371a75fac9d9e54dc8fc833a5c01af0dad0c5f68dc14ddea15333612\": rpc error: code = NotFound desc = could not find container \"b30edc14371a75fac9d9e54dc8fc833a5c01af0dad0c5f68dc14ddea15333612\": container with ID starting with b30edc14371a75fac9d9e54dc8fc833a5c01af0dad0c5f68dc14ddea15333612 not found: ID does not exist" Nov 21 14:06:18 crc kubenswrapper[5133]: I1121 14:06:18.676186 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="18aab30d-6eea-41e2-966c-371f6e0a2e76" containerName="nova-manage" Nov 21 14:06:18 crc kubenswrapper[5133]: I1121 14:06:18.676217 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="1f1c02de-a2c5-4930-b61e-435635bc7079" containerName="nova-metadata-log" Nov 21 14:06:18 crc kubenswrapper[5133]: I1121 14:06:18.676231 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="169191a4-579a-4818-be80-bd64998e72a1" containerName="dnsmasq-dns" Nov 21 14:06:18 crc kubenswrapper[5133]: I1121 14:06:18.676244 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="1f1c02de-a2c5-4930-b61e-435635bc7079" containerName="nova-metadata-metadata" Nov 21 14:06:18 crc kubenswrapper[5133]: I1121 14:06:18.677643 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 21 14:06:18 crc kubenswrapper[5133]: I1121 14:06:18.680703 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 21 14:06:18 crc kubenswrapper[5133]: I1121 14:06:18.680879 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 21 14:06:18 crc kubenswrapper[5133]: I1121 14:06:18.683811 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 21 14:06:18 crc kubenswrapper[5133]: I1121 14:06:18.776048 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jkrbz\" (UniqueName: \"kubernetes.io/projected/20e07bbe-ddbc-445a-8201-2aa590f4f1a3-kube-api-access-jkrbz\") pod \"nova-metadata-0\" (UID: \"20e07bbe-ddbc-445a-8201-2aa590f4f1a3\") " pod="openstack/nova-metadata-0" Nov 21 14:06:18 crc kubenswrapper[5133]: I1121 14:06:18.776170 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/20e07bbe-ddbc-445a-8201-2aa590f4f1a3-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"20e07bbe-ddbc-445a-8201-2aa590f4f1a3\") " pod="openstack/nova-metadata-0" Nov 21 14:06:18 crc kubenswrapper[5133]: I1121 14:06:18.776231 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/20e07bbe-ddbc-445a-8201-2aa590f4f1a3-config-data\") pod \"nova-metadata-0\" (UID: \"20e07bbe-ddbc-445a-8201-2aa590f4f1a3\") " pod="openstack/nova-metadata-0" Nov 21 14:06:18 crc kubenswrapper[5133]: I1121 14:06:18.776356 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/20e07bbe-ddbc-445a-8201-2aa590f4f1a3-logs\") pod \"nova-metadata-0\" (UID: \"20e07bbe-ddbc-445a-8201-2aa590f4f1a3\") " pod="openstack/nova-metadata-0" Nov 21 14:06:18 crc kubenswrapper[5133]: I1121 14:06:18.776467 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/20e07bbe-ddbc-445a-8201-2aa590f4f1a3-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"20e07bbe-ddbc-445a-8201-2aa590f4f1a3\") " pod="openstack/nova-metadata-0" Nov 21 14:06:18 crc kubenswrapper[5133]: I1121 14:06:18.877864 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jkrbz\" (UniqueName: \"kubernetes.io/projected/20e07bbe-ddbc-445a-8201-2aa590f4f1a3-kube-api-access-jkrbz\") pod \"nova-metadata-0\" (UID: \"20e07bbe-ddbc-445a-8201-2aa590f4f1a3\") " pod="openstack/nova-metadata-0" Nov 21 14:06:18 crc kubenswrapper[5133]: I1121 14:06:18.877931 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/20e07bbe-ddbc-445a-8201-2aa590f4f1a3-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"20e07bbe-ddbc-445a-8201-2aa590f4f1a3\") " pod="openstack/nova-metadata-0" Nov 21 14:06:18 crc kubenswrapper[5133]: I1121 14:06:18.877964 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/20e07bbe-ddbc-445a-8201-2aa590f4f1a3-config-data\") pod \"nova-metadata-0\" (UID: \"20e07bbe-ddbc-445a-8201-2aa590f4f1a3\") " pod="openstack/nova-metadata-0" Nov 21 14:06:18 crc kubenswrapper[5133]: I1121 14:06:18.878031 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/20e07bbe-ddbc-445a-8201-2aa590f4f1a3-logs\") pod \"nova-metadata-0\" (UID: \"20e07bbe-ddbc-445a-8201-2aa590f4f1a3\") " pod="openstack/nova-metadata-0" Nov 21 14:06:18 crc kubenswrapper[5133]: I1121 14:06:18.878075 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/20e07bbe-ddbc-445a-8201-2aa590f4f1a3-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"20e07bbe-ddbc-445a-8201-2aa590f4f1a3\") " pod="openstack/nova-metadata-0" Nov 21 14:06:18 crc kubenswrapper[5133]: I1121 14:06:18.883456 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/20e07bbe-ddbc-445a-8201-2aa590f4f1a3-logs\") pod \"nova-metadata-0\" (UID: \"20e07bbe-ddbc-445a-8201-2aa590f4f1a3\") " pod="openstack/nova-metadata-0" Nov 21 14:06:18 crc kubenswrapper[5133]: I1121 14:06:18.883854 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/20e07bbe-ddbc-445a-8201-2aa590f4f1a3-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"20e07bbe-ddbc-445a-8201-2aa590f4f1a3\") " pod="openstack/nova-metadata-0" Nov 21 14:06:18 crc kubenswrapper[5133]: I1121 14:06:18.883870 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/20e07bbe-ddbc-445a-8201-2aa590f4f1a3-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"20e07bbe-ddbc-445a-8201-2aa590f4f1a3\") " pod="openstack/nova-metadata-0" Nov 21 14:06:18 crc kubenswrapper[5133]: I1121 14:06:18.909960 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jkrbz\" (UniqueName: \"kubernetes.io/projected/20e07bbe-ddbc-445a-8201-2aa590f4f1a3-kube-api-access-jkrbz\") pod \"nova-metadata-0\" (UID: \"20e07bbe-ddbc-445a-8201-2aa590f4f1a3\") " pod="openstack/nova-metadata-0" Nov 21 14:06:18 crc kubenswrapper[5133]: I1121 14:06:18.917962 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/20e07bbe-ddbc-445a-8201-2aa590f4f1a3-config-data\") pod \"nova-metadata-0\" (UID: \"20e07bbe-ddbc-445a-8201-2aa590f4f1a3\") " pod="openstack/nova-metadata-0" Nov 21 14:06:18 crc kubenswrapper[5133]: I1121 14:06:18.998069 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-bf6r4" Nov 21 14:06:19 crc kubenswrapper[5133]: I1121 14:06:19.027100 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 21 14:06:19 crc kubenswrapper[5133]: I1121 14:06:19.081316 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/32a69b88-3e19-4aca-ad8e-0a5b35c136cf-combined-ca-bundle\") pod \"32a69b88-3e19-4aca-ad8e-0a5b35c136cf\" (UID: \"32a69b88-3e19-4aca-ad8e-0a5b35c136cf\") " Nov 21 14:06:19 crc kubenswrapper[5133]: I1121 14:06:19.081406 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/32a69b88-3e19-4aca-ad8e-0a5b35c136cf-scripts\") pod \"32a69b88-3e19-4aca-ad8e-0a5b35c136cf\" (UID: \"32a69b88-3e19-4aca-ad8e-0a5b35c136cf\") " Nov 21 14:06:19 crc kubenswrapper[5133]: I1121 14:06:19.081579 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/32a69b88-3e19-4aca-ad8e-0a5b35c136cf-config-data\") pod \"32a69b88-3e19-4aca-ad8e-0a5b35c136cf\" (UID: \"32a69b88-3e19-4aca-ad8e-0a5b35c136cf\") " Nov 21 14:06:19 crc kubenswrapper[5133]: I1121 14:06:19.081637 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q4bfm\" (UniqueName: \"kubernetes.io/projected/32a69b88-3e19-4aca-ad8e-0a5b35c136cf-kube-api-access-q4bfm\") pod \"32a69b88-3e19-4aca-ad8e-0a5b35c136cf\" (UID: \"32a69b88-3e19-4aca-ad8e-0a5b35c136cf\") " Nov 21 14:06:19 crc kubenswrapper[5133]: I1121 14:06:19.100443 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/32a69b88-3e19-4aca-ad8e-0a5b35c136cf-kube-api-access-q4bfm" (OuterVolumeSpecName: "kube-api-access-q4bfm") pod "32a69b88-3e19-4aca-ad8e-0a5b35c136cf" (UID: "32a69b88-3e19-4aca-ad8e-0a5b35c136cf"). InnerVolumeSpecName "kube-api-access-q4bfm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:06:19 crc kubenswrapper[5133]: I1121 14:06:19.100457 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/32a69b88-3e19-4aca-ad8e-0a5b35c136cf-scripts" (OuterVolumeSpecName: "scripts") pod "32a69b88-3e19-4aca-ad8e-0a5b35c136cf" (UID: "32a69b88-3e19-4aca-ad8e-0a5b35c136cf"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:06:19 crc kubenswrapper[5133]: I1121 14:06:19.118923 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/32a69b88-3e19-4aca-ad8e-0a5b35c136cf-config-data" (OuterVolumeSpecName: "config-data") pod "32a69b88-3e19-4aca-ad8e-0a5b35c136cf" (UID: "32a69b88-3e19-4aca-ad8e-0a5b35c136cf"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:06:19 crc kubenswrapper[5133]: I1121 14:06:19.121735 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/32a69b88-3e19-4aca-ad8e-0a5b35c136cf-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "32a69b88-3e19-4aca-ad8e-0a5b35c136cf" (UID: "32a69b88-3e19-4aca-ad8e-0a5b35c136cf"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:06:19 crc kubenswrapper[5133]: I1121 14:06:19.184180 5133 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/32a69b88-3e19-4aca-ad8e-0a5b35c136cf-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 14:06:19 crc kubenswrapper[5133]: I1121 14:06:19.184212 5133 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/32a69b88-3e19-4aca-ad8e-0a5b35c136cf-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 14:06:19 crc kubenswrapper[5133]: I1121 14:06:19.184223 5133 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/32a69b88-3e19-4aca-ad8e-0a5b35c136cf-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 14:06:19 crc kubenswrapper[5133]: I1121 14:06:19.184232 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q4bfm\" (UniqueName: \"kubernetes.io/projected/32a69b88-3e19-4aca-ad8e-0a5b35c136cf-kube-api-access-q4bfm\") on node \"crc\" DevicePath \"\"" Nov 21 14:06:19 crc kubenswrapper[5133]: I1121 14:06:19.508686 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 21 14:06:19 crc kubenswrapper[5133]: I1121 14:06:19.605936 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"20e07bbe-ddbc-445a-8201-2aa590f4f1a3","Type":"ContainerStarted","Data":"1a876b66b29c738220b932b5010dfae60f2dab1900a3557fc7da509f3c52c157"} Nov 21 14:06:19 crc kubenswrapper[5133]: I1121 14:06:19.612054 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-bf6r4" Nov 21 14:06:19 crc kubenswrapper[5133]: I1121 14:06:19.611999 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-bf6r4" event={"ID":"32a69b88-3e19-4aca-ad8e-0a5b35c136cf","Type":"ContainerDied","Data":"6bac5fa3e90e9af3beb2e8f2ad2205a1c78a237aafe6091b2b5389f64b46ce11"} Nov 21 14:06:19 crc kubenswrapper[5133]: I1121 14:06:19.612235 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6bac5fa3e90e9af3beb2e8f2ad2205a1c78a237aafe6091b2b5389f64b46ce11" Nov 21 14:06:19 crc kubenswrapper[5133]: I1121 14:06:19.641986 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 21 14:06:19 crc kubenswrapper[5133]: E1121 14:06:19.642512 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="32a69b88-3e19-4aca-ad8e-0a5b35c136cf" containerName="nova-cell1-conductor-db-sync" Nov 21 14:06:19 crc kubenswrapper[5133]: I1121 14:06:19.642534 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="32a69b88-3e19-4aca-ad8e-0a5b35c136cf" containerName="nova-cell1-conductor-db-sync" Nov 21 14:06:19 crc kubenswrapper[5133]: I1121 14:06:19.642846 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="32a69b88-3e19-4aca-ad8e-0a5b35c136cf" containerName="nova-cell1-conductor-db-sync" Nov 21 14:06:19 crc kubenswrapper[5133]: I1121 14:06:19.643667 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 21 14:06:19 crc kubenswrapper[5133]: I1121 14:06:19.646127 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 21 14:06:19 crc kubenswrapper[5133]: I1121 14:06:19.659734 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 21 14:06:19 crc kubenswrapper[5133]: I1121 14:06:19.702969 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3c061b8f-ed3c-4996-80b5-6e8cfcc18968-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"3c061b8f-ed3c-4996-80b5-6e8cfcc18968\") " pod="openstack/nova-cell1-conductor-0" Nov 21 14:06:19 crc kubenswrapper[5133]: I1121 14:06:19.703365 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-99pcs\" (UniqueName: \"kubernetes.io/projected/3c061b8f-ed3c-4996-80b5-6e8cfcc18968-kube-api-access-99pcs\") pod \"nova-cell1-conductor-0\" (UID: \"3c061b8f-ed3c-4996-80b5-6e8cfcc18968\") " pod="openstack/nova-cell1-conductor-0" Nov 21 14:06:19 crc kubenswrapper[5133]: I1121 14:06:19.703491 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c061b8f-ed3c-4996-80b5-6e8cfcc18968-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"3c061b8f-ed3c-4996-80b5-6e8cfcc18968\") " pod="openstack/nova-cell1-conductor-0" Nov 21 14:06:19 crc kubenswrapper[5133]: I1121 14:06:19.783850 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 21 14:06:19 crc kubenswrapper[5133]: I1121 14:06:19.784439 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="866a2674-f006-4883-9598-6902879561e6" containerName="kube-state-metrics" containerID="cri-o://80eb98ef67b269d2835b33e186b85ab1360097b890c1e8139b27442200003800" gracePeriod=30 Nov 21 14:06:19 crc kubenswrapper[5133]: I1121 14:06:19.805859 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-99pcs\" (UniqueName: \"kubernetes.io/projected/3c061b8f-ed3c-4996-80b5-6e8cfcc18968-kube-api-access-99pcs\") pod \"nova-cell1-conductor-0\" (UID: \"3c061b8f-ed3c-4996-80b5-6e8cfcc18968\") " pod="openstack/nova-cell1-conductor-0" Nov 21 14:06:19 crc kubenswrapper[5133]: I1121 14:06:19.805934 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c061b8f-ed3c-4996-80b5-6e8cfcc18968-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"3c061b8f-ed3c-4996-80b5-6e8cfcc18968\") " pod="openstack/nova-cell1-conductor-0" Nov 21 14:06:19 crc kubenswrapper[5133]: I1121 14:06:19.806021 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3c061b8f-ed3c-4996-80b5-6e8cfcc18968-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"3c061b8f-ed3c-4996-80b5-6e8cfcc18968\") " pod="openstack/nova-cell1-conductor-0" Nov 21 14:06:19 crc kubenswrapper[5133]: I1121 14:06:19.811334 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c061b8f-ed3c-4996-80b5-6e8cfcc18968-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"3c061b8f-ed3c-4996-80b5-6e8cfcc18968\") " pod="openstack/nova-cell1-conductor-0" Nov 21 14:06:19 crc kubenswrapper[5133]: I1121 14:06:19.822799 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3c061b8f-ed3c-4996-80b5-6e8cfcc18968-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"3c061b8f-ed3c-4996-80b5-6e8cfcc18968\") " pod="openstack/nova-cell1-conductor-0" Nov 21 14:06:19 crc kubenswrapper[5133]: I1121 14:06:19.864141 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-99pcs\" (UniqueName: \"kubernetes.io/projected/3c061b8f-ed3c-4996-80b5-6e8cfcc18968-kube-api-access-99pcs\") pod \"nova-cell1-conductor-0\" (UID: \"3c061b8f-ed3c-4996-80b5-6e8cfcc18968\") " pod="openstack/nova-cell1-conductor-0" Nov 21 14:06:19 crc kubenswrapper[5133]: E1121 14:06:19.917135 5133 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e301a655d4c2dfa4770a6ec603f88db379353fde6256c0b5d2cf428fc090a259" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 21 14:06:19 crc kubenswrapper[5133]: E1121 14:06:19.918615 5133 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e301a655d4c2dfa4770a6ec603f88db379353fde6256c0b5d2cf428fc090a259" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 21 14:06:19 crc kubenswrapper[5133]: E1121 14:06:19.920039 5133 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e301a655d4c2dfa4770a6ec603f88db379353fde6256c0b5d2cf428fc090a259" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 21 14:06:19 crc kubenswrapper[5133]: E1121 14:06:19.920111 5133 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="bc5ef4e0-ce6d-4519-b4fc-b7aa0d04c8a8" containerName="nova-scheduler-scheduler" Nov 21 14:06:20 crc kubenswrapper[5133]: I1121 14:06:20.002597 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 21 14:06:20 crc kubenswrapper[5133]: I1121 14:06:20.195987 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 21 14:06:20 crc kubenswrapper[5133]: I1121 14:06:20.323923 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2kdxk\" (UniqueName: \"kubernetes.io/projected/866a2674-f006-4883-9598-6902879561e6-kube-api-access-2kdxk\") pod \"866a2674-f006-4883-9598-6902879561e6\" (UID: \"866a2674-f006-4883-9598-6902879561e6\") " Nov 21 14:06:20 crc kubenswrapper[5133]: I1121 14:06:20.336724 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/866a2674-f006-4883-9598-6902879561e6-kube-api-access-2kdxk" (OuterVolumeSpecName: "kube-api-access-2kdxk") pod "866a2674-f006-4883-9598-6902879561e6" (UID: "866a2674-f006-4883-9598-6902879561e6"). InnerVolumeSpecName "kube-api-access-2kdxk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:06:20 crc kubenswrapper[5133]: I1121 14:06:20.426496 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 21 14:06:20 crc kubenswrapper[5133]: I1121 14:06:20.427645 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2kdxk\" (UniqueName: \"kubernetes.io/projected/866a2674-f006-4883-9598-6902879561e6-kube-api-access-2kdxk\") on node \"crc\" DevicePath \"\"" Nov 21 14:06:20 crc kubenswrapper[5133]: W1121 14:06:20.458398 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3c061b8f_ed3c_4996_80b5_6e8cfcc18968.slice/crio-373985de891bdf00f38e35ca9f167ea62c361a24848ae96fd387297381511ac6 WatchSource:0}: Error finding container 373985de891bdf00f38e35ca9f167ea62c361a24848ae96fd387297381511ac6: Status 404 returned error can't find the container with id 373985de891bdf00f38e35ca9f167ea62c361a24848ae96fd387297381511ac6 Nov 21 14:06:20 crc kubenswrapper[5133]: I1121 14:06:20.485988 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1f1c02de-a2c5-4930-b61e-435635bc7079" path="/var/lib/kubelet/pods/1f1c02de-a2c5-4930-b61e-435635bc7079/volumes" Nov 21 14:06:20 crc kubenswrapper[5133]: I1121 14:06:20.624706 5133 generic.go:334] "Generic (PLEG): container finished" podID="866a2674-f006-4883-9598-6902879561e6" containerID="80eb98ef67b269d2835b33e186b85ab1360097b890c1e8139b27442200003800" exitCode=2 Nov 21 14:06:20 crc kubenswrapper[5133]: I1121 14:06:20.624768 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"866a2674-f006-4883-9598-6902879561e6","Type":"ContainerDied","Data":"80eb98ef67b269d2835b33e186b85ab1360097b890c1e8139b27442200003800"} Nov 21 14:06:20 crc kubenswrapper[5133]: I1121 14:06:20.624799 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"866a2674-f006-4883-9598-6902879561e6","Type":"ContainerDied","Data":"a6da2f6d0dae395dd621890330b00cebafe58ec3e03e3eb3d43d33d2eb1e2aaa"} Nov 21 14:06:20 crc kubenswrapper[5133]: I1121 14:06:20.624818 5133 scope.go:117] "RemoveContainer" containerID="80eb98ef67b269d2835b33e186b85ab1360097b890c1e8139b27442200003800" Nov 21 14:06:20 crc kubenswrapper[5133]: I1121 14:06:20.624964 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 21 14:06:20 crc kubenswrapper[5133]: I1121 14:06:20.630800 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"3c061b8f-ed3c-4996-80b5-6e8cfcc18968","Type":"ContainerStarted","Data":"373985de891bdf00f38e35ca9f167ea62c361a24848ae96fd387297381511ac6"} Nov 21 14:06:20 crc kubenswrapper[5133]: I1121 14:06:20.637654 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"20e07bbe-ddbc-445a-8201-2aa590f4f1a3","Type":"ContainerStarted","Data":"c5a0caca50a2fd71756d9ecfee5e5d6b206fa80f2755bc92cd4696e86b873f92"} Nov 21 14:06:20 crc kubenswrapper[5133]: I1121 14:06:20.637724 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"20e07bbe-ddbc-445a-8201-2aa590f4f1a3","Type":"ContainerStarted","Data":"4a9eaf07ead25cdf6dea3d7f7ae153a3fd284f61f07f5e5cd31f2432bfaa2560"} Nov 21 14:06:20 crc kubenswrapper[5133]: I1121 14:06:20.649884 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 21 14:06:20 crc kubenswrapper[5133]: I1121 14:06:20.676391 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 21 14:06:20 crc kubenswrapper[5133]: I1121 14:06:20.682654 5133 scope.go:117] "RemoveContainer" containerID="80eb98ef67b269d2835b33e186b85ab1360097b890c1e8139b27442200003800" Nov 21 14:06:20 crc kubenswrapper[5133]: E1121 14:06:20.683352 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"80eb98ef67b269d2835b33e186b85ab1360097b890c1e8139b27442200003800\": container with ID starting with 80eb98ef67b269d2835b33e186b85ab1360097b890c1e8139b27442200003800 not found: ID does not exist" containerID="80eb98ef67b269d2835b33e186b85ab1360097b890c1e8139b27442200003800" Nov 21 14:06:20 crc kubenswrapper[5133]: I1121 14:06:20.683398 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"80eb98ef67b269d2835b33e186b85ab1360097b890c1e8139b27442200003800"} err="failed to get container status \"80eb98ef67b269d2835b33e186b85ab1360097b890c1e8139b27442200003800\": rpc error: code = NotFound desc = could not find container \"80eb98ef67b269d2835b33e186b85ab1360097b890c1e8139b27442200003800\": container with ID starting with 80eb98ef67b269d2835b33e186b85ab1360097b890c1e8139b27442200003800 not found: ID does not exist" Nov 21 14:06:20 crc kubenswrapper[5133]: I1121 14:06:20.690087 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 21 14:06:20 crc kubenswrapper[5133]: E1121 14:06:20.690698 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="866a2674-f006-4883-9598-6902879561e6" containerName="kube-state-metrics" Nov 21 14:06:20 crc kubenswrapper[5133]: I1121 14:06:20.690723 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="866a2674-f006-4883-9598-6902879561e6" containerName="kube-state-metrics" Nov 21 14:06:20 crc kubenswrapper[5133]: I1121 14:06:20.690921 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="866a2674-f006-4883-9598-6902879561e6" containerName="kube-state-metrics" Nov 21 14:06:20 crc kubenswrapper[5133]: I1121 14:06:20.692025 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 21 14:06:20 crc kubenswrapper[5133]: I1121 14:06:20.695971 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Nov 21 14:06:20 crc kubenswrapper[5133]: I1121 14:06:20.696154 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Nov 21 14:06:20 crc kubenswrapper[5133]: I1121 14:06:20.722287 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.722253539 podStartE2EDuration="2.722253539s" podCreationTimestamp="2025-11-21 14:06:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 14:06:20.662703589 +0000 UTC m=+1440.460535837" watchObservedRunningTime="2025-11-21 14:06:20.722253539 +0000 UTC m=+1440.520085787" Nov 21 14:06:20 crc kubenswrapper[5133]: I1121 14:06:20.732538 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 21 14:06:20 crc kubenswrapper[5133]: I1121 14:06:20.750441 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p9tqd\" (UniqueName: \"kubernetes.io/projected/6898e19f-ae5d-4d82-ac31-d5100fb81625-kube-api-access-p9tqd\") pod \"kube-state-metrics-0\" (UID: \"6898e19f-ae5d-4d82-ac31-d5100fb81625\") " pod="openstack/kube-state-metrics-0" Nov 21 14:06:20 crc kubenswrapper[5133]: I1121 14:06:20.750754 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6898e19f-ae5d-4d82-ac31-d5100fb81625-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"6898e19f-ae5d-4d82-ac31-d5100fb81625\") " pod="openstack/kube-state-metrics-0" Nov 21 14:06:20 crc kubenswrapper[5133]: I1121 14:06:20.750924 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/6898e19f-ae5d-4d82-ac31-d5100fb81625-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"6898e19f-ae5d-4d82-ac31-d5100fb81625\") " pod="openstack/kube-state-metrics-0" Nov 21 14:06:20 crc kubenswrapper[5133]: I1121 14:06:20.751056 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/6898e19f-ae5d-4d82-ac31-d5100fb81625-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"6898e19f-ae5d-4d82-ac31-d5100fb81625\") " pod="openstack/kube-state-metrics-0" Nov 21 14:06:20 crc kubenswrapper[5133]: I1121 14:06:20.853139 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p9tqd\" (UniqueName: \"kubernetes.io/projected/6898e19f-ae5d-4d82-ac31-d5100fb81625-kube-api-access-p9tqd\") pod \"kube-state-metrics-0\" (UID: \"6898e19f-ae5d-4d82-ac31-d5100fb81625\") " pod="openstack/kube-state-metrics-0" Nov 21 14:06:20 crc kubenswrapper[5133]: I1121 14:06:20.853432 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6898e19f-ae5d-4d82-ac31-d5100fb81625-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"6898e19f-ae5d-4d82-ac31-d5100fb81625\") " pod="openstack/kube-state-metrics-0" Nov 21 14:06:20 crc kubenswrapper[5133]: I1121 14:06:20.853605 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/6898e19f-ae5d-4d82-ac31-d5100fb81625-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"6898e19f-ae5d-4d82-ac31-d5100fb81625\") " pod="openstack/kube-state-metrics-0" Nov 21 14:06:20 crc kubenswrapper[5133]: I1121 14:06:20.853742 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/6898e19f-ae5d-4d82-ac31-d5100fb81625-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"6898e19f-ae5d-4d82-ac31-d5100fb81625\") " pod="openstack/kube-state-metrics-0" Nov 21 14:06:20 crc kubenswrapper[5133]: I1121 14:06:20.857768 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6898e19f-ae5d-4d82-ac31-d5100fb81625-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"6898e19f-ae5d-4d82-ac31-d5100fb81625\") " pod="openstack/kube-state-metrics-0" Nov 21 14:06:20 crc kubenswrapper[5133]: I1121 14:06:20.858323 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/6898e19f-ae5d-4d82-ac31-d5100fb81625-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"6898e19f-ae5d-4d82-ac31-d5100fb81625\") " pod="openstack/kube-state-metrics-0" Nov 21 14:06:20 crc kubenswrapper[5133]: I1121 14:06:20.858495 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/6898e19f-ae5d-4d82-ac31-d5100fb81625-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"6898e19f-ae5d-4d82-ac31-d5100fb81625\") " pod="openstack/kube-state-metrics-0" Nov 21 14:06:20 crc kubenswrapper[5133]: I1121 14:06:20.873854 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p9tqd\" (UniqueName: \"kubernetes.io/projected/6898e19f-ae5d-4d82-ac31-d5100fb81625-kube-api-access-p9tqd\") pod \"kube-state-metrics-0\" (UID: \"6898e19f-ae5d-4d82-ac31-d5100fb81625\") " pod="openstack/kube-state-metrics-0" Nov 21 14:06:21 crc kubenswrapper[5133]: I1121 14:06:21.018760 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 21 14:06:21 crc kubenswrapper[5133]: I1121 14:06:21.333388 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 21 14:06:21 crc kubenswrapper[5133]: I1121 14:06:21.335879 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="cf456764-4242-4005-a375-6907d47a6e57" containerName="ceilometer-central-agent" containerID="cri-o://f8f89aac00239bf2eb080f241d851d4bf7ebe3de6804e4ae7255d87adb7cb231" gracePeriod=30 Nov 21 14:06:21 crc kubenswrapper[5133]: I1121 14:06:21.336536 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="cf456764-4242-4005-a375-6907d47a6e57" containerName="proxy-httpd" containerID="cri-o://cf3c0ec71b455d21002e500e92efd309e8fbb304f5a67165da473c8bc2860187" gracePeriod=30 Nov 21 14:06:21 crc kubenswrapper[5133]: I1121 14:06:21.336652 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="cf456764-4242-4005-a375-6907d47a6e57" containerName="sg-core" containerID="cri-o://07a1cace95003875fe4bb26916a74fa8cec4803a8a98d974040e016f7fa755d8" gracePeriod=30 Nov 21 14:06:21 crc kubenswrapper[5133]: I1121 14:06:21.336753 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="cf456764-4242-4005-a375-6907d47a6e57" containerName="ceilometer-notification-agent" containerID="cri-o://5ec1d1505d36a49fc8295d4c19fa8aacd8985140309a4b1ef1bd7e3087b8651a" gracePeriod=30 Nov 21 14:06:21 crc kubenswrapper[5133]: I1121 14:06:21.507742 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 21 14:06:21 crc kubenswrapper[5133]: I1121 14:06:21.673015 5133 generic.go:334] "Generic (PLEG): container finished" podID="5ed3ebfb-e282-425f-b086-f90ed8782d39" containerID="219eae399ca3e89586a3268c46d406cf6893ca3257f669a4e8d3da5456cf9a6c" exitCode=0 Nov 21 14:06:21 crc kubenswrapper[5133]: I1121 14:06:21.673155 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"5ed3ebfb-e282-425f-b086-f90ed8782d39","Type":"ContainerDied","Data":"219eae399ca3e89586a3268c46d406cf6893ca3257f669a4e8d3da5456cf9a6c"} Nov 21 14:06:21 crc kubenswrapper[5133]: I1121 14:06:21.684280 5133 generic.go:334] "Generic (PLEG): container finished" podID="bc5ef4e0-ce6d-4519-b4fc-b7aa0d04c8a8" containerID="e301a655d4c2dfa4770a6ec603f88db379353fde6256c0b5d2cf428fc090a259" exitCode=0 Nov 21 14:06:21 crc kubenswrapper[5133]: I1121 14:06:21.687265 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"bc5ef4e0-ce6d-4519-b4fc-b7aa0d04c8a8","Type":"ContainerDied","Data":"e301a655d4c2dfa4770a6ec603f88db379353fde6256c0b5d2cf428fc090a259"} Nov 21 14:06:21 crc kubenswrapper[5133]: I1121 14:06:21.693072 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-j5d8m"] Nov 21 14:06:21 crc kubenswrapper[5133]: I1121 14:06:21.707972 5133 generic.go:334] "Generic (PLEG): container finished" podID="cf456764-4242-4005-a375-6907d47a6e57" containerID="cf3c0ec71b455d21002e500e92efd309e8fbb304f5a67165da473c8bc2860187" exitCode=0 Nov 21 14:06:21 crc kubenswrapper[5133]: I1121 14:06:21.708215 5133 generic.go:334] "Generic (PLEG): container finished" podID="cf456764-4242-4005-a375-6907d47a6e57" containerID="07a1cace95003875fe4bb26916a74fa8cec4803a8a98d974040e016f7fa755d8" exitCode=2 Nov 21 14:06:21 crc kubenswrapper[5133]: I1121 14:06:21.708445 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cf456764-4242-4005-a375-6907d47a6e57","Type":"ContainerDied","Data":"cf3c0ec71b455d21002e500e92efd309e8fbb304f5a67165da473c8bc2860187"} Nov 21 14:06:21 crc kubenswrapper[5133]: I1121 14:06:21.708511 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cf456764-4242-4005-a375-6907d47a6e57","Type":"ContainerDied","Data":"07a1cace95003875fe4bb26916a74fa8cec4803a8a98d974040e016f7fa755d8"} Nov 21 14:06:21 crc kubenswrapper[5133]: I1121 14:06:21.714984 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"3c061b8f-ed3c-4996-80b5-6e8cfcc18968","Type":"ContainerStarted","Data":"9034068f64d100f9dbba22ce466830bcee037fc02d34cf1a35586017432453ce"} Nov 21 14:06:21 crc kubenswrapper[5133]: I1121 14:06:21.722635 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"6898e19f-ae5d-4d82-ac31-d5100fb81625","Type":"ContainerStarted","Data":"a488b255a3721baac2dcf71a68f1b876f9eb521ee087d1be50f0b508a1f5a287"} Nov 21 14:06:21 crc kubenswrapper[5133]: I1121 14:06:21.733501 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-j5d8m" Nov 21 14:06:21 crc kubenswrapper[5133]: I1121 14:06:21.734862 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Nov 21 14:06:21 crc kubenswrapper[5133]: I1121 14:06:21.750620 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-j5d8m"] Nov 21 14:06:21 crc kubenswrapper[5133]: I1121 14:06:21.756260 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 21 14:06:21 crc kubenswrapper[5133]: I1121 14:06:21.769033 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.769013133 podStartE2EDuration="2.769013133s" podCreationTimestamp="2025-11-21 14:06:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 14:06:21.760910546 +0000 UTC m=+1441.558742794" watchObservedRunningTime="2025-11-21 14:06:21.769013133 +0000 UTC m=+1441.566845381" Nov 21 14:06:21 crc kubenswrapper[5133]: I1121 14:06:21.794229 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/243f1648-cb3f-4d5e-ac89-25469a1e9896-utilities\") pod \"community-operators-j5d8m\" (UID: \"243f1648-cb3f-4d5e-ac89-25469a1e9896\") " pod="openshift-marketplace/community-operators-j5d8m" Nov 21 14:06:21 crc kubenswrapper[5133]: I1121 14:06:21.794708 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/243f1648-cb3f-4d5e-ac89-25469a1e9896-catalog-content\") pod \"community-operators-j5d8m\" (UID: \"243f1648-cb3f-4d5e-ac89-25469a1e9896\") " pod="openshift-marketplace/community-operators-j5d8m" Nov 21 14:06:21 crc kubenswrapper[5133]: I1121 14:06:21.795549 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dbckw\" (UniqueName: \"kubernetes.io/projected/243f1648-cb3f-4d5e-ac89-25469a1e9896-kube-api-access-dbckw\") pod \"community-operators-j5d8m\" (UID: \"243f1648-cb3f-4d5e-ac89-25469a1e9896\") " pod="openshift-marketplace/community-operators-j5d8m" Nov 21 14:06:21 crc kubenswrapper[5133]: I1121 14:06:21.850430 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 21 14:06:21 crc kubenswrapper[5133]: I1121 14:06:21.896864 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bc5ef4e0-ce6d-4519-b4fc-b7aa0d04c8a8-combined-ca-bundle\") pod \"bc5ef4e0-ce6d-4519-b4fc-b7aa0d04c8a8\" (UID: \"bc5ef4e0-ce6d-4519-b4fc-b7aa0d04c8a8\") " Nov 21 14:06:21 crc kubenswrapper[5133]: I1121 14:06:21.896963 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bc5ef4e0-ce6d-4519-b4fc-b7aa0d04c8a8-config-data\") pod \"bc5ef4e0-ce6d-4519-b4fc-b7aa0d04c8a8\" (UID: \"bc5ef4e0-ce6d-4519-b4fc-b7aa0d04c8a8\") " Nov 21 14:06:21 crc kubenswrapper[5133]: I1121 14:06:21.896993 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c7kpj\" (UniqueName: \"kubernetes.io/projected/bc5ef4e0-ce6d-4519-b4fc-b7aa0d04c8a8-kube-api-access-c7kpj\") pod \"bc5ef4e0-ce6d-4519-b4fc-b7aa0d04c8a8\" (UID: \"bc5ef4e0-ce6d-4519-b4fc-b7aa0d04c8a8\") " Nov 21 14:06:21 crc kubenswrapper[5133]: I1121 14:06:21.897645 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dbckw\" (UniqueName: \"kubernetes.io/projected/243f1648-cb3f-4d5e-ac89-25469a1e9896-kube-api-access-dbckw\") pod \"community-operators-j5d8m\" (UID: \"243f1648-cb3f-4d5e-ac89-25469a1e9896\") " pod="openshift-marketplace/community-operators-j5d8m" Nov 21 14:06:21 crc kubenswrapper[5133]: I1121 14:06:21.897692 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/243f1648-cb3f-4d5e-ac89-25469a1e9896-utilities\") pod \"community-operators-j5d8m\" (UID: \"243f1648-cb3f-4d5e-ac89-25469a1e9896\") " pod="openshift-marketplace/community-operators-j5d8m" Nov 21 14:06:21 crc kubenswrapper[5133]: I1121 14:06:21.897745 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/243f1648-cb3f-4d5e-ac89-25469a1e9896-catalog-content\") pod \"community-operators-j5d8m\" (UID: \"243f1648-cb3f-4d5e-ac89-25469a1e9896\") " pod="openshift-marketplace/community-operators-j5d8m" Nov 21 14:06:21 crc kubenswrapper[5133]: I1121 14:06:21.900929 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/243f1648-cb3f-4d5e-ac89-25469a1e9896-utilities\") pod \"community-operators-j5d8m\" (UID: \"243f1648-cb3f-4d5e-ac89-25469a1e9896\") " pod="openshift-marketplace/community-operators-j5d8m" Nov 21 14:06:21 crc kubenswrapper[5133]: I1121 14:06:21.901400 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/243f1648-cb3f-4d5e-ac89-25469a1e9896-catalog-content\") pod \"community-operators-j5d8m\" (UID: \"243f1648-cb3f-4d5e-ac89-25469a1e9896\") " pod="openshift-marketplace/community-operators-j5d8m" Nov 21 14:06:21 crc kubenswrapper[5133]: I1121 14:06:21.911730 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5ef4e0-ce6d-4519-b4fc-b7aa0d04c8a8-kube-api-access-c7kpj" (OuterVolumeSpecName: "kube-api-access-c7kpj") pod "bc5ef4e0-ce6d-4519-b4fc-b7aa0d04c8a8" (UID: "bc5ef4e0-ce6d-4519-b4fc-b7aa0d04c8a8"). InnerVolumeSpecName "kube-api-access-c7kpj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:06:21 crc kubenswrapper[5133]: I1121 14:06:21.926396 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dbckw\" (UniqueName: \"kubernetes.io/projected/243f1648-cb3f-4d5e-ac89-25469a1e9896-kube-api-access-dbckw\") pod \"community-operators-j5d8m\" (UID: \"243f1648-cb3f-4d5e-ac89-25469a1e9896\") " pod="openshift-marketplace/community-operators-j5d8m" Nov 21 14:06:21 crc kubenswrapper[5133]: I1121 14:06:21.953285 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5ef4e0-ce6d-4519-b4fc-b7aa0d04c8a8-config-data" (OuterVolumeSpecName: "config-data") pod "bc5ef4e0-ce6d-4519-b4fc-b7aa0d04c8a8" (UID: "bc5ef4e0-ce6d-4519-b4fc-b7aa0d04c8a8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:06:21 crc kubenswrapper[5133]: I1121 14:06:21.972943 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5ef4e0-ce6d-4519-b4fc-b7aa0d04c8a8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bc5ef4e0-ce6d-4519-b4fc-b7aa0d04c8a8" (UID: "bc5ef4e0-ce6d-4519-b4fc-b7aa0d04c8a8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:06:21 crc kubenswrapper[5133]: I1121 14:06:21.998889 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ed3ebfb-e282-425f-b086-f90ed8782d39-config-data\") pod \"5ed3ebfb-e282-425f-b086-f90ed8782d39\" (UID: \"5ed3ebfb-e282-425f-b086-f90ed8782d39\") " Nov 21 14:06:21 crc kubenswrapper[5133]: I1121 14:06:21.998953 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5ed3ebfb-e282-425f-b086-f90ed8782d39-logs\") pod \"5ed3ebfb-e282-425f-b086-f90ed8782d39\" (UID: \"5ed3ebfb-e282-425f-b086-f90ed8782d39\") " Nov 21 14:06:21 crc kubenswrapper[5133]: I1121 14:06:21.999132 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-454cg\" (UniqueName: \"kubernetes.io/projected/5ed3ebfb-e282-425f-b086-f90ed8782d39-kube-api-access-454cg\") pod \"5ed3ebfb-e282-425f-b086-f90ed8782d39\" (UID: \"5ed3ebfb-e282-425f-b086-f90ed8782d39\") " Nov 21 14:06:21 crc kubenswrapper[5133]: I1121 14:06:21.999217 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ed3ebfb-e282-425f-b086-f90ed8782d39-combined-ca-bundle\") pod \"5ed3ebfb-e282-425f-b086-f90ed8782d39\" (UID: \"5ed3ebfb-e282-425f-b086-f90ed8782d39\") " Nov 21 14:06:21 crc kubenswrapper[5133]: I1121 14:06:21.999784 5133 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bc5ef4e0-ce6d-4519-b4fc-b7aa0d04c8a8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 14:06:21 crc kubenswrapper[5133]: I1121 14:06:21.999807 5133 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bc5ef4e0-ce6d-4519-b4fc-b7aa0d04c8a8-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 14:06:21 crc kubenswrapper[5133]: I1121 14:06:21.999818 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c7kpj\" (UniqueName: \"kubernetes.io/projected/bc5ef4e0-ce6d-4519-b4fc-b7aa0d04c8a8-kube-api-access-c7kpj\") on node \"crc\" DevicePath \"\"" Nov 21 14:06:22 crc kubenswrapper[5133]: I1121 14:06:22.000319 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5ed3ebfb-e282-425f-b086-f90ed8782d39-logs" (OuterVolumeSpecName: "logs") pod "5ed3ebfb-e282-425f-b086-f90ed8782d39" (UID: "5ed3ebfb-e282-425f-b086-f90ed8782d39"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:06:22 crc kubenswrapper[5133]: I1121 14:06:22.006273 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5ed3ebfb-e282-425f-b086-f90ed8782d39-kube-api-access-454cg" (OuterVolumeSpecName: "kube-api-access-454cg") pod "5ed3ebfb-e282-425f-b086-f90ed8782d39" (UID: "5ed3ebfb-e282-425f-b086-f90ed8782d39"). InnerVolumeSpecName "kube-api-access-454cg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:06:22 crc kubenswrapper[5133]: I1121 14:06:22.034936 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ed3ebfb-e282-425f-b086-f90ed8782d39-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5ed3ebfb-e282-425f-b086-f90ed8782d39" (UID: "5ed3ebfb-e282-425f-b086-f90ed8782d39"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:06:22 crc kubenswrapper[5133]: I1121 14:06:22.045250 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ed3ebfb-e282-425f-b086-f90ed8782d39-config-data" (OuterVolumeSpecName: "config-data") pod "5ed3ebfb-e282-425f-b086-f90ed8782d39" (UID: "5ed3ebfb-e282-425f-b086-f90ed8782d39"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:06:22 crc kubenswrapper[5133]: I1121 14:06:22.074393 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-j5d8m" Nov 21 14:06:22 crc kubenswrapper[5133]: I1121 14:06:22.101732 5133 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ed3ebfb-e282-425f-b086-f90ed8782d39-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 14:06:22 crc kubenswrapper[5133]: I1121 14:06:22.101789 5133 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ed3ebfb-e282-425f-b086-f90ed8782d39-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 14:06:22 crc kubenswrapper[5133]: I1121 14:06:22.101802 5133 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5ed3ebfb-e282-425f-b086-f90ed8782d39-logs\") on node \"crc\" DevicePath \"\"" Nov 21 14:06:22 crc kubenswrapper[5133]: I1121 14:06:22.101814 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-454cg\" (UniqueName: \"kubernetes.io/projected/5ed3ebfb-e282-425f-b086-f90ed8782d39-kube-api-access-454cg\") on node \"crc\" DevicePath \"\"" Nov 21 14:06:22 crc kubenswrapper[5133]: I1121 14:06:22.476294 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="866a2674-f006-4883-9598-6902879561e6" path="/var/lib/kubelet/pods/866a2674-f006-4883-9598-6902879561e6/volumes" Nov 21 14:06:22 crc kubenswrapper[5133]: I1121 14:06:22.720915 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-j5d8m"] Nov 21 14:06:22 crc kubenswrapper[5133]: I1121 14:06:22.738187 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"6898e19f-ae5d-4d82-ac31-d5100fb81625","Type":"ContainerStarted","Data":"d198d07d1d82f66d2fa5487e19ac980820434e7ddbb851a2fdb2d145983d3ae6"} Nov 21 14:06:22 crc kubenswrapper[5133]: I1121 14:06:22.738598 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 21 14:06:22 crc kubenswrapper[5133]: I1121 14:06:22.740792 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-j5d8m" event={"ID":"243f1648-cb3f-4d5e-ac89-25469a1e9896","Type":"ContainerStarted","Data":"e7b66dd691367c30a22f25d564537ad79c39799baaf9f8b214f02829ca326b0d"} Nov 21 14:06:22 crc kubenswrapper[5133]: I1121 14:06:22.745888 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 21 14:06:22 crc kubenswrapper[5133]: I1121 14:06:22.745885 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"5ed3ebfb-e282-425f-b086-f90ed8782d39","Type":"ContainerDied","Data":"fc2e4a7ab48cf945ddb5677dd5ef890d706cf21143416c556db7084f4b3815da"} Nov 21 14:06:22 crc kubenswrapper[5133]: I1121 14:06:22.745986 5133 scope.go:117] "RemoveContainer" containerID="219eae399ca3e89586a3268c46d406cf6893ca3257f669a4e8d3da5456cf9a6c" Nov 21 14:06:22 crc kubenswrapper[5133]: I1121 14:06:22.749358 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"bc5ef4e0-ce6d-4519-b4fc-b7aa0d04c8a8","Type":"ContainerDied","Data":"be71da5dbc91b97af93bd6df6f57cd34cab4d7448c0e2cd3bf5bfd06fde69ab7"} Nov 21 14:06:22 crc kubenswrapper[5133]: I1121 14:06:22.749422 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 21 14:06:22 crc kubenswrapper[5133]: I1121 14:06:22.761064 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=2.33063456 podStartE2EDuration="2.761036974s" podCreationTimestamp="2025-11-21 14:06:20 +0000 UTC" firstStartedPulling="2025-11-21 14:06:21.516029597 +0000 UTC m=+1441.313861845" lastFinishedPulling="2025-11-21 14:06:21.946432011 +0000 UTC m=+1441.744264259" observedRunningTime="2025-11-21 14:06:22.75411472 +0000 UTC m=+1442.551946978" watchObservedRunningTime="2025-11-21 14:06:22.761036974 +0000 UTC m=+1442.558869222" Nov 21 14:06:22 crc kubenswrapper[5133]: I1121 14:06:22.766406 5133 generic.go:334] "Generic (PLEG): container finished" podID="cf456764-4242-4005-a375-6907d47a6e57" containerID="f8f89aac00239bf2eb080f241d851d4bf7ebe3de6804e4ae7255d87adb7cb231" exitCode=0 Nov 21 14:06:22 crc kubenswrapper[5133]: I1121 14:06:22.766617 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cf456764-4242-4005-a375-6907d47a6e57","Type":"ContainerDied","Data":"f8f89aac00239bf2eb080f241d851d4bf7ebe3de6804e4ae7255d87adb7cb231"} Nov 21 14:06:22 crc kubenswrapper[5133]: I1121 14:06:22.780667 5133 scope.go:117] "RemoveContainer" containerID="502e1733fbfa31392cd21daf95af8efcdb946ce979257b4b5bd795bf39e718d1" Nov 21 14:06:22 crc kubenswrapper[5133]: I1121 14:06:22.801914 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 21 14:06:22 crc kubenswrapper[5133]: I1121 14:06:22.829896 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 21 14:06:22 crc kubenswrapper[5133]: I1121 14:06:22.855938 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 21 14:06:22 crc kubenswrapper[5133]: E1121 14:06:22.856636 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ed3ebfb-e282-425f-b086-f90ed8782d39" containerName="nova-api-log" Nov 21 14:06:22 crc kubenswrapper[5133]: I1121 14:06:22.856661 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ed3ebfb-e282-425f-b086-f90ed8782d39" containerName="nova-api-log" Nov 21 14:06:22 crc kubenswrapper[5133]: E1121 14:06:22.856685 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ed3ebfb-e282-425f-b086-f90ed8782d39" containerName="nova-api-api" Nov 21 14:06:22 crc kubenswrapper[5133]: I1121 14:06:22.856693 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ed3ebfb-e282-425f-b086-f90ed8782d39" containerName="nova-api-api" Nov 21 14:06:22 crc kubenswrapper[5133]: E1121 14:06:22.856717 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bc5ef4e0-ce6d-4519-b4fc-b7aa0d04c8a8" containerName="nova-scheduler-scheduler" Nov 21 14:06:22 crc kubenswrapper[5133]: I1121 14:06:22.856724 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="bc5ef4e0-ce6d-4519-b4fc-b7aa0d04c8a8" containerName="nova-scheduler-scheduler" Nov 21 14:06:22 crc kubenswrapper[5133]: I1121 14:06:22.856915 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="5ed3ebfb-e282-425f-b086-f90ed8782d39" containerName="nova-api-api" Nov 21 14:06:22 crc kubenswrapper[5133]: I1121 14:06:22.856932 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="bc5ef4e0-ce6d-4519-b4fc-b7aa0d04c8a8" containerName="nova-scheduler-scheduler" Nov 21 14:06:22 crc kubenswrapper[5133]: I1121 14:06:22.856951 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="5ed3ebfb-e282-425f-b086-f90ed8782d39" containerName="nova-api-log" Nov 21 14:06:22 crc kubenswrapper[5133]: I1121 14:06:22.857825 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 21 14:06:22 crc kubenswrapper[5133]: I1121 14:06:22.864980 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 21 14:06:22 crc kubenswrapper[5133]: I1121 14:06:22.880334 5133 scope.go:117] "RemoveContainer" containerID="e301a655d4c2dfa4770a6ec603f88db379353fde6256c0b5d2cf428fc090a259" Nov 21 14:06:22 crc kubenswrapper[5133]: I1121 14:06:22.882567 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 21 14:06:22 crc kubenswrapper[5133]: I1121 14:06:22.890269 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 21 14:06:22 crc kubenswrapper[5133]: I1121 14:06:22.905202 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 21 14:06:22 crc kubenswrapper[5133]: I1121 14:06:22.906804 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 21 14:06:22 crc kubenswrapper[5133]: I1121 14:06:22.909146 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 21 14:06:22 crc kubenswrapper[5133]: I1121 14:06:22.912201 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 21 14:06:22 crc kubenswrapper[5133]: I1121 14:06:22.917782 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 21 14:06:23 crc kubenswrapper[5133]: I1121 14:06:23.037549 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4f45677f-30ca-4b41-b51b-7f78f43aca35-logs\") pod \"nova-api-0\" (UID: \"4f45677f-30ca-4b41-b51b-7f78f43aca35\") " pod="openstack/nova-api-0" Nov 21 14:06:23 crc kubenswrapper[5133]: I1121 14:06:23.037639 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ed15e88e-868f-4a35-9966-6d486325db91-config-data\") pod \"nova-scheduler-0\" (UID: \"ed15e88e-868f-4a35-9966-6d486325db91\") " pod="openstack/nova-scheduler-0" Nov 21 14:06:23 crc kubenswrapper[5133]: I1121 14:06:23.037674 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f45677f-30ca-4b41-b51b-7f78f43aca35-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"4f45677f-30ca-4b41-b51b-7f78f43aca35\") " pod="openstack/nova-api-0" Nov 21 14:06:23 crc kubenswrapper[5133]: I1121 14:06:23.037719 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2xjmf\" (UniqueName: \"kubernetes.io/projected/ed15e88e-868f-4a35-9966-6d486325db91-kube-api-access-2xjmf\") pod \"nova-scheduler-0\" (UID: \"ed15e88e-868f-4a35-9966-6d486325db91\") " pod="openstack/nova-scheduler-0" Nov 21 14:06:23 crc kubenswrapper[5133]: I1121 14:06:23.037755 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4f45677f-30ca-4b41-b51b-7f78f43aca35-config-data\") pod \"nova-api-0\" (UID: \"4f45677f-30ca-4b41-b51b-7f78f43aca35\") " pod="openstack/nova-api-0" Nov 21 14:06:23 crc kubenswrapper[5133]: I1121 14:06:23.037838 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qbrf4\" (UniqueName: \"kubernetes.io/projected/4f45677f-30ca-4b41-b51b-7f78f43aca35-kube-api-access-qbrf4\") pod \"nova-api-0\" (UID: \"4f45677f-30ca-4b41-b51b-7f78f43aca35\") " pod="openstack/nova-api-0" Nov 21 14:06:23 crc kubenswrapper[5133]: I1121 14:06:23.037888 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed15e88e-868f-4a35-9966-6d486325db91-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"ed15e88e-868f-4a35-9966-6d486325db91\") " pod="openstack/nova-scheduler-0" Nov 21 14:06:23 crc kubenswrapper[5133]: I1121 14:06:23.140115 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4f45677f-30ca-4b41-b51b-7f78f43aca35-logs\") pod \"nova-api-0\" (UID: \"4f45677f-30ca-4b41-b51b-7f78f43aca35\") " pod="openstack/nova-api-0" Nov 21 14:06:23 crc kubenswrapper[5133]: I1121 14:06:23.140183 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ed15e88e-868f-4a35-9966-6d486325db91-config-data\") pod \"nova-scheduler-0\" (UID: \"ed15e88e-868f-4a35-9966-6d486325db91\") " pod="openstack/nova-scheduler-0" Nov 21 14:06:23 crc kubenswrapper[5133]: I1121 14:06:23.140209 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f45677f-30ca-4b41-b51b-7f78f43aca35-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"4f45677f-30ca-4b41-b51b-7f78f43aca35\") " pod="openstack/nova-api-0" Nov 21 14:06:23 crc kubenswrapper[5133]: I1121 14:06:23.140239 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2xjmf\" (UniqueName: \"kubernetes.io/projected/ed15e88e-868f-4a35-9966-6d486325db91-kube-api-access-2xjmf\") pod \"nova-scheduler-0\" (UID: \"ed15e88e-868f-4a35-9966-6d486325db91\") " pod="openstack/nova-scheduler-0" Nov 21 14:06:23 crc kubenswrapper[5133]: I1121 14:06:23.140264 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4f45677f-30ca-4b41-b51b-7f78f43aca35-config-data\") pod \"nova-api-0\" (UID: \"4f45677f-30ca-4b41-b51b-7f78f43aca35\") " pod="openstack/nova-api-0" Nov 21 14:06:23 crc kubenswrapper[5133]: I1121 14:06:23.140316 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qbrf4\" (UniqueName: \"kubernetes.io/projected/4f45677f-30ca-4b41-b51b-7f78f43aca35-kube-api-access-qbrf4\") pod \"nova-api-0\" (UID: \"4f45677f-30ca-4b41-b51b-7f78f43aca35\") " pod="openstack/nova-api-0" Nov 21 14:06:23 crc kubenswrapper[5133]: I1121 14:06:23.140349 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed15e88e-868f-4a35-9966-6d486325db91-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"ed15e88e-868f-4a35-9966-6d486325db91\") " pod="openstack/nova-scheduler-0" Nov 21 14:06:23 crc kubenswrapper[5133]: I1121 14:06:23.140677 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4f45677f-30ca-4b41-b51b-7f78f43aca35-logs\") pod \"nova-api-0\" (UID: \"4f45677f-30ca-4b41-b51b-7f78f43aca35\") " pod="openstack/nova-api-0" Nov 21 14:06:23 crc kubenswrapper[5133]: I1121 14:06:23.146623 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed15e88e-868f-4a35-9966-6d486325db91-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"ed15e88e-868f-4a35-9966-6d486325db91\") " pod="openstack/nova-scheduler-0" Nov 21 14:06:23 crc kubenswrapper[5133]: I1121 14:06:23.146697 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f45677f-30ca-4b41-b51b-7f78f43aca35-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"4f45677f-30ca-4b41-b51b-7f78f43aca35\") " pod="openstack/nova-api-0" Nov 21 14:06:23 crc kubenswrapper[5133]: I1121 14:06:23.146912 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4f45677f-30ca-4b41-b51b-7f78f43aca35-config-data\") pod \"nova-api-0\" (UID: \"4f45677f-30ca-4b41-b51b-7f78f43aca35\") " pod="openstack/nova-api-0" Nov 21 14:06:23 crc kubenswrapper[5133]: I1121 14:06:23.147699 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ed15e88e-868f-4a35-9966-6d486325db91-config-data\") pod \"nova-scheduler-0\" (UID: \"ed15e88e-868f-4a35-9966-6d486325db91\") " pod="openstack/nova-scheduler-0" Nov 21 14:06:23 crc kubenswrapper[5133]: I1121 14:06:23.159228 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2xjmf\" (UniqueName: \"kubernetes.io/projected/ed15e88e-868f-4a35-9966-6d486325db91-kube-api-access-2xjmf\") pod \"nova-scheduler-0\" (UID: \"ed15e88e-868f-4a35-9966-6d486325db91\") " pod="openstack/nova-scheduler-0" Nov 21 14:06:23 crc kubenswrapper[5133]: I1121 14:06:23.163321 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qbrf4\" (UniqueName: \"kubernetes.io/projected/4f45677f-30ca-4b41-b51b-7f78f43aca35-kube-api-access-qbrf4\") pod \"nova-api-0\" (UID: \"4f45677f-30ca-4b41-b51b-7f78f43aca35\") " pod="openstack/nova-api-0" Nov 21 14:06:23 crc kubenswrapper[5133]: I1121 14:06:23.197402 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 21 14:06:23 crc kubenswrapper[5133]: I1121 14:06:23.224448 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 21 14:06:23 crc kubenswrapper[5133]: I1121 14:06:23.310485 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 14:06:23 crc kubenswrapper[5133]: I1121 14:06:23.310551 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 14:06:23 crc kubenswrapper[5133]: I1121 14:06:23.789372 5133 generic.go:334] "Generic (PLEG): container finished" podID="243f1648-cb3f-4d5e-ac89-25469a1e9896" containerID="2e430da6c4392530042399d7787cf93e47a9ad9129fee6078865306eb90eb1c6" exitCode=0 Nov 21 14:06:23 crc kubenswrapper[5133]: I1121 14:06:23.790344 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-j5d8m" event={"ID":"243f1648-cb3f-4d5e-ac89-25469a1e9896","Type":"ContainerDied","Data":"2e430da6c4392530042399d7787cf93e47a9ad9129fee6078865306eb90eb1c6"} Nov 21 14:06:23 crc kubenswrapper[5133]: I1121 14:06:23.819252 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 21 14:06:23 crc kubenswrapper[5133]: I1121 14:06:23.875749 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"ed15e88e-868f-4a35-9966-6d486325db91","Type":"ContainerStarted","Data":"d47d79ccad21ed3f086e9cec925782891cad7a0b50f3c3ea5f72816db7126d12"} Nov 21 14:06:23 crc kubenswrapper[5133]: I1121 14:06:23.881316 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 21 14:06:24 crc kubenswrapper[5133]: I1121 14:06:24.027182 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 21 14:06:24 crc kubenswrapper[5133]: I1121 14:06:24.027332 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 21 14:06:24 crc kubenswrapper[5133]: I1121 14:06:24.487543 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5ed3ebfb-e282-425f-b086-f90ed8782d39" path="/var/lib/kubelet/pods/5ed3ebfb-e282-425f-b086-f90ed8782d39/volumes" Nov 21 14:06:24 crc kubenswrapper[5133]: I1121 14:06:24.488823 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5ef4e0-ce6d-4519-b4fc-b7aa0d04c8a8" path="/var/lib/kubelet/pods/bc5ef4e0-ce6d-4519-b4fc-b7aa0d04c8a8/volumes" Nov 21 14:06:24 crc kubenswrapper[5133]: I1121 14:06:24.873468 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 14:06:24 crc kubenswrapper[5133]: I1121 14:06:24.884501 5133 generic.go:334] "Generic (PLEG): container finished" podID="cf456764-4242-4005-a375-6907d47a6e57" containerID="5ec1d1505d36a49fc8295d4c19fa8aacd8985140309a4b1ef1bd7e3087b8651a" exitCode=0 Nov 21 14:06:24 crc kubenswrapper[5133]: I1121 14:06:24.884574 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cf456764-4242-4005-a375-6907d47a6e57","Type":"ContainerDied","Data":"5ec1d1505d36a49fc8295d4c19fa8aacd8985140309a4b1ef1bd7e3087b8651a"} Nov 21 14:06:24 crc kubenswrapper[5133]: I1121 14:06:24.884605 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cf456764-4242-4005-a375-6907d47a6e57","Type":"ContainerDied","Data":"8fe5d391d6d95072bfa6fb1d07f13bfdb550e674ff1726bb7bb6a81c379f164b"} Nov 21 14:06:24 crc kubenswrapper[5133]: I1121 14:06:24.884624 5133 scope.go:117] "RemoveContainer" containerID="cf3c0ec71b455d21002e500e92efd309e8fbb304f5a67165da473c8bc2860187" Nov 21 14:06:24 crc kubenswrapper[5133]: I1121 14:06:24.884745 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 14:06:24 crc kubenswrapper[5133]: I1121 14:06:24.888184 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"ed15e88e-868f-4a35-9966-6d486325db91","Type":"ContainerStarted","Data":"f80dfaf053872459d558aac0e0782cd60f18e1649f9aec950a194c37ee3aa585"} Nov 21 14:06:24 crc kubenswrapper[5133]: I1121 14:06:24.892114 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"4f45677f-30ca-4b41-b51b-7f78f43aca35","Type":"ContainerStarted","Data":"3fb7d9bb90d270d6a8c13ea0147a9db8ad659df6f8a7fcfdd90388a0af0b1e33"} Nov 21 14:06:24 crc kubenswrapper[5133]: I1121 14:06:24.892346 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"4f45677f-30ca-4b41-b51b-7f78f43aca35","Type":"ContainerStarted","Data":"b006b0e429049c30fabb4a139e775063545ccd842780064378dd54f62d8065b3"} Nov 21 14:06:24 crc kubenswrapper[5133]: I1121 14:06:24.892364 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"4f45677f-30ca-4b41-b51b-7f78f43aca35","Type":"ContainerStarted","Data":"b8a5866be3fc60754ebdbc975c13156fcd27bdee98a740a43e31c3889cbb5956"} Nov 21 14:06:24 crc kubenswrapper[5133]: I1121 14:06:24.923797 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.923761779 podStartE2EDuration="2.923761779s" podCreationTimestamp="2025-11-21 14:06:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 14:06:24.922881645 +0000 UTC m=+1444.720713893" watchObservedRunningTime="2025-11-21 14:06:24.923761779 +0000 UTC m=+1444.721594027" Nov 21 14:06:24 crc kubenswrapper[5133]: I1121 14:06:24.932330 5133 scope.go:117] "RemoveContainer" containerID="07a1cace95003875fe4bb26916a74fa8cec4803a8a98d974040e016f7fa755d8" Nov 21 14:06:24 crc kubenswrapper[5133]: I1121 14:06:24.960112 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.9599955060000003 podStartE2EDuration="2.959995506s" podCreationTimestamp="2025-11-21 14:06:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 14:06:24.954291204 +0000 UTC m=+1444.752123452" watchObservedRunningTime="2025-11-21 14:06:24.959995506 +0000 UTC m=+1444.757827754" Nov 21 14:06:24 crc kubenswrapper[5133]: I1121 14:06:24.962544 5133 scope.go:117] "RemoveContainer" containerID="5ec1d1505d36a49fc8295d4c19fa8aacd8985140309a4b1ef1bd7e3087b8651a" Nov 21 14:06:24 crc kubenswrapper[5133]: I1121 14:06:24.985483 5133 scope.go:117] "RemoveContainer" containerID="f8f89aac00239bf2eb080f241d851d4bf7ebe3de6804e4ae7255d87adb7cb231" Nov 21 14:06:24 crc kubenswrapper[5133]: I1121 14:06:24.995774 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-blwcb\" (UniqueName: \"kubernetes.io/projected/cf456764-4242-4005-a375-6907d47a6e57-kube-api-access-blwcb\") pod \"cf456764-4242-4005-a375-6907d47a6e57\" (UID: \"cf456764-4242-4005-a375-6907d47a6e57\") " Nov 21 14:06:24 crc kubenswrapper[5133]: I1121 14:06:24.995828 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf456764-4242-4005-a375-6907d47a6e57-combined-ca-bundle\") pod \"cf456764-4242-4005-a375-6907d47a6e57\" (UID: \"cf456764-4242-4005-a375-6907d47a6e57\") " Nov 21 14:06:24 crc kubenswrapper[5133]: I1121 14:06:24.995895 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cf456764-4242-4005-a375-6907d47a6e57-run-httpd\") pod \"cf456764-4242-4005-a375-6907d47a6e57\" (UID: \"cf456764-4242-4005-a375-6907d47a6e57\") " Nov 21 14:06:24 crc kubenswrapper[5133]: I1121 14:06:24.996063 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cf456764-4242-4005-a375-6907d47a6e57-scripts\") pod \"cf456764-4242-4005-a375-6907d47a6e57\" (UID: \"cf456764-4242-4005-a375-6907d47a6e57\") " Nov 21 14:06:24 crc kubenswrapper[5133]: I1121 14:06:24.996128 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cf456764-4242-4005-a375-6907d47a6e57-log-httpd\") pod \"cf456764-4242-4005-a375-6907d47a6e57\" (UID: \"cf456764-4242-4005-a375-6907d47a6e57\") " Nov 21 14:06:24 crc kubenswrapper[5133]: I1121 14:06:24.996159 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/cf456764-4242-4005-a375-6907d47a6e57-sg-core-conf-yaml\") pod \"cf456764-4242-4005-a375-6907d47a6e57\" (UID: \"cf456764-4242-4005-a375-6907d47a6e57\") " Nov 21 14:06:24 crc kubenswrapper[5133]: I1121 14:06:24.996187 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cf456764-4242-4005-a375-6907d47a6e57-config-data\") pod \"cf456764-4242-4005-a375-6907d47a6e57\" (UID: \"cf456764-4242-4005-a375-6907d47a6e57\") " Nov 21 14:06:25 crc kubenswrapper[5133]: I1121 14:06:24.999865 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cf456764-4242-4005-a375-6907d47a6e57-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "cf456764-4242-4005-a375-6907d47a6e57" (UID: "cf456764-4242-4005-a375-6907d47a6e57"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:06:25 crc kubenswrapper[5133]: I1121 14:06:25.000048 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cf456764-4242-4005-a375-6907d47a6e57-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "cf456764-4242-4005-a375-6907d47a6e57" (UID: "cf456764-4242-4005-a375-6907d47a6e57"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:06:25 crc kubenswrapper[5133]: I1121 14:06:25.006654 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cf456764-4242-4005-a375-6907d47a6e57-kube-api-access-blwcb" (OuterVolumeSpecName: "kube-api-access-blwcb") pod "cf456764-4242-4005-a375-6907d47a6e57" (UID: "cf456764-4242-4005-a375-6907d47a6e57"). InnerVolumeSpecName "kube-api-access-blwcb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:06:25 crc kubenswrapper[5133]: I1121 14:06:25.011396 5133 scope.go:117] "RemoveContainer" containerID="cf3c0ec71b455d21002e500e92efd309e8fbb304f5a67165da473c8bc2860187" Nov 21 14:06:25 crc kubenswrapper[5133]: E1121 14:06:25.013135 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cf3c0ec71b455d21002e500e92efd309e8fbb304f5a67165da473c8bc2860187\": container with ID starting with cf3c0ec71b455d21002e500e92efd309e8fbb304f5a67165da473c8bc2860187 not found: ID does not exist" containerID="cf3c0ec71b455d21002e500e92efd309e8fbb304f5a67165da473c8bc2860187" Nov 21 14:06:25 crc kubenswrapper[5133]: I1121 14:06:25.013174 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cf3c0ec71b455d21002e500e92efd309e8fbb304f5a67165da473c8bc2860187"} err="failed to get container status \"cf3c0ec71b455d21002e500e92efd309e8fbb304f5a67165da473c8bc2860187\": rpc error: code = NotFound desc = could not find container \"cf3c0ec71b455d21002e500e92efd309e8fbb304f5a67165da473c8bc2860187\": container with ID starting with cf3c0ec71b455d21002e500e92efd309e8fbb304f5a67165da473c8bc2860187 not found: ID does not exist" Nov 21 14:06:25 crc kubenswrapper[5133]: I1121 14:06:25.013200 5133 scope.go:117] "RemoveContainer" containerID="07a1cace95003875fe4bb26916a74fa8cec4803a8a98d974040e016f7fa755d8" Nov 21 14:06:25 crc kubenswrapper[5133]: E1121 14:06:25.014828 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"07a1cace95003875fe4bb26916a74fa8cec4803a8a98d974040e016f7fa755d8\": container with ID starting with 07a1cace95003875fe4bb26916a74fa8cec4803a8a98d974040e016f7fa755d8 not found: ID does not exist" containerID="07a1cace95003875fe4bb26916a74fa8cec4803a8a98d974040e016f7fa755d8" Nov 21 14:06:25 crc kubenswrapper[5133]: I1121 14:06:25.014874 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"07a1cace95003875fe4bb26916a74fa8cec4803a8a98d974040e016f7fa755d8"} err="failed to get container status \"07a1cace95003875fe4bb26916a74fa8cec4803a8a98d974040e016f7fa755d8\": rpc error: code = NotFound desc = could not find container \"07a1cace95003875fe4bb26916a74fa8cec4803a8a98d974040e016f7fa755d8\": container with ID starting with 07a1cace95003875fe4bb26916a74fa8cec4803a8a98d974040e016f7fa755d8 not found: ID does not exist" Nov 21 14:06:25 crc kubenswrapper[5133]: I1121 14:06:25.014893 5133 scope.go:117] "RemoveContainer" containerID="5ec1d1505d36a49fc8295d4c19fa8aacd8985140309a4b1ef1bd7e3087b8651a" Nov 21 14:06:25 crc kubenswrapper[5133]: E1121 14:06:25.015851 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5ec1d1505d36a49fc8295d4c19fa8aacd8985140309a4b1ef1bd7e3087b8651a\": container with ID starting with 5ec1d1505d36a49fc8295d4c19fa8aacd8985140309a4b1ef1bd7e3087b8651a not found: ID does not exist" containerID="5ec1d1505d36a49fc8295d4c19fa8aacd8985140309a4b1ef1bd7e3087b8651a" Nov 21 14:06:25 crc kubenswrapper[5133]: I1121 14:06:25.015884 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5ec1d1505d36a49fc8295d4c19fa8aacd8985140309a4b1ef1bd7e3087b8651a"} err="failed to get container status \"5ec1d1505d36a49fc8295d4c19fa8aacd8985140309a4b1ef1bd7e3087b8651a\": rpc error: code = NotFound desc = could not find container \"5ec1d1505d36a49fc8295d4c19fa8aacd8985140309a4b1ef1bd7e3087b8651a\": container with ID starting with 5ec1d1505d36a49fc8295d4c19fa8aacd8985140309a4b1ef1bd7e3087b8651a not found: ID does not exist" Nov 21 14:06:25 crc kubenswrapper[5133]: I1121 14:06:25.015909 5133 scope.go:117] "RemoveContainer" containerID="f8f89aac00239bf2eb080f241d851d4bf7ebe3de6804e4ae7255d87adb7cb231" Nov 21 14:06:25 crc kubenswrapper[5133]: E1121 14:06:25.020112 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f8f89aac00239bf2eb080f241d851d4bf7ebe3de6804e4ae7255d87adb7cb231\": container with ID starting with f8f89aac00239bf2eb080f241d851d4bf7ebe3de6804e4ae7255d87adb7cb231 not found: ID does not exist" containerID="f8f89aac00239bf2eb080f241d851d4bf7ebe3de6804e4ae7255d87adb7cb231" Nov 21 14:06:25 crc kubenswrapper[5133]: I1121 14:06:25.020140 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f8f89aac00239bf2eb080f241d851d4bf7ebe3de6804e4ae7255d87adb7cb231"} err="failed to get container status \"f8f89aac00239bf2eb080f241d851d4bf7ebe3de6804e4ae7255d87adb7cb231\": rpc error: code = NotFound desc = could not find container \"f8f89aac00239bf2eb080f241d851d4bf7ebe3de6804e4ae7255d87adb7cb231\": container with ID starting with f8f89aac00239bf2eb080f241d851d4bf7ebe3de6804e4ae7255d87adb7cb231 not found: ID does not exist" Nov 21 14:06:25 crc kubenswrapper[5133]: I1121 14:06:25.023778 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf456764-4242-4005-a375-6907d47a6e57-scripts" (OuterVolumeSpecName: "scripts") pod "cf456764-4242-4005-a375-6907d47a6e57" (UID: "cf456764-4242-4005-a375-6907d47a6e57"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:06:25 crc kubenswrapper[5133]: I1121 14:06:25.036287 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Nov 21 14:06:25 crc kubenswrapper[5133]: I1121 14:06:25.040291 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf456764-4242-4005-a375-6907d47a6e57-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "cf456764-4242-4005-a375-6907d47a6e57" (UID: "cf456764-4242-4005-a375-6907d47a6e57"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:06:25 crc kubenswrapper[5133]: I1121 14:06:25.083405 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf456764-4242-4005-a375-6907d47a6e57-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cf456764-4242-4005-a375-6907d47a6e57" (UID: "cf456764-4242-4005-a375-6907d47a6e57"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:06:25 crc kubenswrapper[5133]: I1121 14:06:25.098828 5133 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cf456764-4242-4005-a375-6907d47a6e57-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 14:06:25 crc kubenswrapper[5133]: I1121 14:06:25.098861 5133 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cf456764-4242-4005-a375-6907d47a6e57-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 21 14:06:25 crc kubenswrapper[5133]: I1121 14:06:25.098870 5133 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/cf456764-4242-4005-a375-6907d47a6e57-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 21 14:06:25 crc kubenswrapper[5133]: I1121 14:06:25.098880 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-blwcb\" (UniqueName: \"kubernetes.io/projected/cf456764-4242-4005-a375-6907d47a6e57-kube-api-access-blwcb\") on node \"crc\" DevicePath \"\"" Nov 21 14:06:25 crc kubenswrapper[5133]: I1121 14:06:25.098888 5133 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf456764-4242-4005-a375-6907d47a6e57-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 14:06:25 crc kubenswrapper[5133]: I1121 14:06:25.098896 5133 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cf456764-4242-4005-a375-6907d47a6e57-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 21 14:06:25 crc kubenswrapper[5133]: I1121 14:06:25.122378 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf456764-4242-4005-a375-6907d47a6e57-config-data" (OuterVolumeSpecName: "config-data") pod "cf456764-4242-4005-a375-6907d47a6e57" (UID: "cf456764-4242-4005-a375-6907d47a6e57"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:06:25 crc kubenswrapper[5133]: I1121 14:06:25.201233 5133 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cf456764-4242-4005-a375-6907d47a6e57-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 14:06:25 crc kubenswrapper[5133]: I1121 14:06:25.230695 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 21 14:06:25 crc kubenswrapper[5133]: I1121 14:06:25.251387 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 21 14:06:25 crc kubenswrapper[5133]: I1121 14:06:25.274069 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 21 14:06:25 crc kubenswrapper[5133]: E1121 14:06:25.274641 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf456764-4242-4005-a375-6907d47a6e57" containerName="sg-core" Nov 21 14:06:25 crc kubenswrapper[5133]: I1121 14:06:25.274661 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf456764-4242-4005-a375-6907d47a6e57" containerName="sg-core" Nov 21 14:06:25 crc kubenswrapper[5133]: E1121 14:06:25.274682 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf456764-4242-4005-a375-6907d47a6e57" containerName="ceilometer-central-agent" Nov 21 14:06:25 crc kubenswrapper[5133]: I1121 14:06:25.274690 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf456764-4242-4005-a375-6907d47a6e57" containerName="ceilometer-central-agent" Nov 21 14:06:25 crc kubenswrapper[5133]: E1121 14:06:25.274727 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf456764-4242-4005-a375-6907d47a6e57" containerName="ceilometer-notification-agent" Nov 21 14:06:25 crc kubenswrapper[5133]: I1121 14:06:25.274736 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf456764-4242-4005-a375-6907d47a6e57" containerName="ceilometer-notification-agent" Nov 21 14:06:25 crc kubenswrapper[5133]: E1121 14:06:25.274764 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf456764-4242-4005-a375-6907d47a6e57" containerName="proxy-httpd" Nov 21 14:06:25 crc kubenswrapper[5133]: I1121 14:06:25.274772 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf456764-4242-4005-a375-6907d47a6e57" containerName="proxy-httpd" Nov 21 14:06:25 crc kubenswrapper[5133]: I1121 14:06:25.275098 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="cf456764-4242-4005-a375-6907d47a6e57" containerName="ceilometer-notification-agent" Nov 21 14:06:25 crc kubenswrapper[5133]: I1121 14:06:25.275113 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="cf456764-4242-4005-a375-6907d47a6e57" containerName="ceilometer-central-agent" Nov 21 14:06:25 crc kubenswrapper[5133]: I1121 14:06:25.275145 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="cf456764-4242-4005-a375-6907d47a6e57" containerName="proxy-httpd" Nov 21 14:06:25 crc kubenswrapper[5133]: I1121 14:06:25.275158 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="cf456764-4242-4005-a375-6907d47a6e57" containerName="sg-core" Nov 21 14:06:25 crc kubenswrapper[5133]: I1121 14:06:25.277289 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 14:06:25 crc kubenswrapper[5133]: I1121 14:06:25.279128 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 21 14:06:25 crc kubenswrapper[5133]: I1121 14:06:25.279512 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 21 14:06:25 crc kubenswrapper[5133]: I1121 14:06:25.279772 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 21 14:06:25 crc kubenswrapper[5133]: I1121 14:06:25.280361 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 21 14:06:25 crc kubenswrapper[5133]: I1121 14:06:25.405014 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de7cad07-b950-4609-b720-18e65526b93c-config-data\") pod \"ceilometer-0\" (UID: \"de7cad07-b950-4609-b720-18e65526b93c\") " pod="openstack/ceilometer-0" Nov 21 14:06:25 crc kubenswrapper[5133]: I1121 14:06:25.405327 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j4x28\" (UniqueName: \"kubernetes.io/projected/de7cad07-b950-4609-b720-18e65526b93c-kube-api-access-j4x28\") pod \"ceilometer-0\" (UID: \"de7cad07-b950-4609-b720-18e65526b93c\") " pod="openstack/ceilometer-0" Nov 21 14:06:25 crc kubenswrapper[5133]: I1121 14:06:25.405463 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de7cad07-b950-4609-b720-18e65526b93c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"de7cad07-b950-4609-b720-18e65526b93c\") " pod="openstack/ceilometer-0" Nov 21 14:06:25 crc kubenswrapper[5133]: I1121 14:06:25.405537 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/de7cad07-b950-4609-b720-18e65526b93c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"de7cad07-b950-4609-b720-18e65526b93c\") " pod="openstack/ceilometer-0" Nov 21 14:06:25 crc kubenswrapper[5133]: I1121 14:06:25.405612 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/de7cad07-b950-4609-b720-18e65526b93c-log-httpd\") pod \"ceilometer-0\" (UID: \"de7cad07-b950-4609-b720-18e65526b93c\") " pod="openstack/ceilometer-0" Nov 21 14:06:25 crc kubenswrapper[5133]: I1121 14:06:25.405704 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/de7cad07-b950-4609-b720-18e65526b93c-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"de7cad07-b950-4609-b720-18e65526b93c\") " pod="openstack/ceilometer-0" Nov 21 14:06:25 crc kubenswrapper[5133]: I1121 14:06:25.405801 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/de7cad07-b950-4609-b720-18e65526b93c-scripts\") pod \"ceilometer-0\" (UID: \"de7cad07-b950-4609-b720-18e65526b93c\") " pod="openstack/ceilometer-0" Nov 21 14:06:25 crc kubenswrapper[5133]: I1121 14:06:25.405928 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/de7cad07-b950-4609-b720-18e65526b93c-run-httpd\") pod \"ceilometer-0\" (UID: \"de7cad07-b950-4609-b720-18e65526b93c\") " pod="openstack/ceilometer-0" Nov 21 14:06:25 crc kubenswrapper[5133]: I1121 14:06:25.507907 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/de7cad07-b950-4609-b720-18e65526b93c-scripts\") pod \"ceilometer-0\" (UID: \"de7cad07-b950-4609-b720-18e65526b93c\") " pod="openstack/ceilometer-0" Nov 21 14:06:25 crc kubenswrapper[5133]: I1121 14:06:25.508021 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/de7cad07-b950-4609-b720-18e65526b93c-run-httpd\") pod \"ceilometer-0\" (UID: \"de7cad07-b950-4609-b720-18e65526b93c\") " pod="openstack/ceilometer-0" Nov 21 14:06:25 crc kubenswrapper[5133]: I1121 14:06:25.508050 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de7cad07-b950-4609-b720-18e65526b93c-config-data\") pod \"ceilometer-0\" (UID: \"de7cad07-b950-4609-b720-18e65526b93c\") " pod="openstack/ceilometer-0" Nov 21 14:06:25 crc kubenswrapper[5133]: I1121 14:06:25.508076 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j4x28\" (UniqueName: \"kubernetes.io/projected/de7cad07-b950-4609-b720-18e65526b93c-kube-api-access-j4x28\") pod \"ceilometer-0\" (UID: \"de7cad07-b950-4609-b720-18e65526b93c\") " pod="openstack/ceilometer-0" Nov 21 14:06:25 crc kubenswrapper[5133]: I1121 14:06:25.508121 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de7cad07-b950-4609-b720-18e65526b93c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"de7cad07-b950-4609-b720-18e65526b93c\") " pod="openstack/ceilometer-0" Nov 21 14:06:25 crc kubenswrapper[5133]: I1121 14:06:25.508144 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/de7cad07-b950-4609-b720-18e65526b93c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"de7cad07-b950-4609-b720-18e65526b93c\") " pod="openstack/ceilometer-0" Nov 21 14:06:25 crc kubenswrapper[5133]: I1121 14:06:25.508166 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/de7cad07-b950-4609-b720-18e65526b93c-log-httpd\") pod \"ceilometer-0\" (UID: \"de7cad07-b950-4609-b720-18e65526b93c\") " pod="openstack/ceilometer-0" Nov 21 14:06:25 crc kubenswrapper[5133]: I1121 14:06:25.508183 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/de7cad07-b950-4609-b720-18e65526b93c-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"de7cad07-b950-4609-b720-18e65526b93c\") " pod="openstack/ceilometer-0" Nov 21 14:06:25 crc kubenswrapper[5133]: I1121 14:06:25.508511 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/de7cad07-b950-4609-b720-18e65526b93c-run-httpd\") pod \"ceilometer-0\" (UID: \"de7cad07-b950-4609-b720-18e65526b93c\") " pod="openstack/ceilometer-0" Nov 21 14:06:25 crc kubenswrapper[5133]: I1121 14:06:25.508725 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/de7cad07-b950-4609-b720-18e65526b93c-log-httpd\") pod \"ceilometer-0\" (UID: \"de7cad07-b950-4609-b720-18e65526b93c\") " pod="openstack/ceilometer-0" Nov 21 14:06:25 crc kubenswrapper[5133]: I1121 14:06:25.512188 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/de7cad07-b950-4609-b720-18e65526b93c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"de7cad07-b950-4609-b720-18e65526b93c\") " pod="openstack/ceilometer-0" Nov 21 14:06:25 crc kubenswrapper[5133]: I1121 14:06:25.512364 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/de7cad07-b950-4609-b720-18e65526b93c-scripts\") pod \"ceilometer-0\" (UID: \"de7cad07-b950-4609-b720-18e65526b93c\") " pod="openstack/ceilometer-0" Nov 21 14:06:25 crc kubenswrapper[5133]: I1121 14:06:25.512466 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/de7cad07-b950-4609-b720-18e65526b93c-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"de7cad07-b950-4609-b720-18e65526b93c\") " pod="openstack/ceilometer-0" Nov 21 14:06:25 crc kubenswrapper[5133]: I1121 14:06:25.513308 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de7cad07-b950-4609-b720-18e65526b93c-config-data\") pod \"ceilometer-0\" (UID: \"de7cad07-b950-4609-b720-18e65526b93c\") " pod="openstack/ceilometer-0" Nov 21 14:06:25 crc kubenswrapper[5133]: I1121 14:06:25.520209 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de7cad07-b950-4609-b720-18e65526b93c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"de7cad07-b950-4609-b720-18e65526b93c\") " pod="openstack/ceilometer-0" Nov 21 14:06:25 crc kubenswrapper[5133]: I1121 14:06:25.526191 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j4x28\" (UniqueName: \"kubernetes.io/projected/de7cad07-b950-4609-b720-18e65526b93c-kube-api-access-j4x28\") pod \"ceilometer-0\" (UID: \"de7cad07-b950-4609-b720-18e65526b93c\") " pod="openstack/ceilometer-0" Nov 21 14:06:25 crc kubenswrapper[5133]: I1121 14:06:25.604039 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 14:06:26 crc kubenswrapper[5133]: I1121 14:06:26.148702 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 21 14:06:26 crc kubenswrapper[5133]: I1121 14:06:26.482503 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cf456764-4242-4005-a375-6907d47a6e57" path="/var/lib/kubelet/pods/cf456764-4242-4005-a375-6907d47a6e57/volumes" Nov 21 14:06:26 crc kubenswrapper[5133]: I1121 14:06:26.915451 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"de7cad07-b950-4609-b720-18e65526b93c","Type":"ContainerStarted","Data":"dd86b111685211ab293aa2262a44ab1bbc05a1539d44dc44687270f9ecd0dcac"} Nov 21 14:06:26 crc kubenswrapper[5133]: I1121 14:06:26.915918 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"de7cad07-b950-4609-b720-18e65526b93c","Type":"ContainerStarted","Data":"95b80841dfc4e7272048d5fabaf5afb54835756c988c90d70b1c7699ca534498"} Nov 21 14:06:28 crc kubenswrapper[5133]: I1121 14:06:28.198495 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 21 14:06:29 crc kubenswrapper[5133]: I1121 14:06:29.027831 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 21 14:06:29 crc kubenswrapper[5133]: I1121 14:06:29.029610 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 21 14:06:29 crc kubenswrapper[5133]: I1121 14:06:29.955943 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"de7cad07-b950-4609-b720-18e65526b93c","Type":"ContainerStarted","Data":"467e748fd5f5dcd1f05f552cda96a1f1e759e9c4a404dcfa043b077c527bb7ce"} Nov 21 14:06:29 crc kubenswrapper[5133]: I1121 14:06:29.958330 5133 generic.go:334] "Generic (PLEG): container finished" podID="243f1648-cb3f-4d5e-ac89-25469a1e9896" containerID="055c3834ce1a19309bbdc7b586a8c6e03df3403eb5e983a76b4e59a624493c6c" exitCode=0 Nov 21 14:06:29 crc kubenswrapper[5133]: I1121 14:06:29.958388 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-j5d8m" event={"ID":"243f1648-cb3f-4d5e-ac89-25469a1e9896","Type":"ContainerDied","Data":"055c3834ce1a19309bbdc7b586a8c6e03df3403eb5e983a76b4e59a624493c6c"} Nov 21 14:06:30 crc kubenswrapper[5133]: I1121 14:06:30.064230 5133 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="20e07bbe-ddbc-445a-8201-2aa590f4f1a3" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.172:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 21 14:06:30 crc kubenswrapper[5133]: I1121 14:06:30.064259 5133 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="20e07bbe-ddbc-445a-8201-2aa590f4f1a3" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.172:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 21 14:06:31 crc kubenswrapper[5133]: I1121 14:06:31.039565 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 21 14:06:31 crc kubenswrapper[5133]: I1121 14:06:31.980977 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"de7cad07-b950-4609-b720-18e65526b93c","Type":"ContainerStarted","Data":"2cb83daf26e32070f045638ac983786a894ecb9b97fa9fc1185e54f154e16d96"} Nov 21 14:06:31 crc kubenswrapper[5133]: I1121 14:06:31.984389 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-j5d8m" event={"ID":"243f1648-cb3f-4d5e-ac89-25469a1e9896","Type":"ContainerStarted","Data":"a7f291fe669cd18139fe42e645fe577e235da4045c6a08429fc91a31e01ac8f5"} Nov 21 14:06:32 crc kubenswrapper[5133]: I1121 14:06:32.013285 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-j5d8m" podStartSLOduration=3.616250636 podStartE2EDuration="11.01326155s" podCreationTimestamp="2025-11-21 14:06:21 +0000 UTC" firstStartedPulling="2025-11-21 14:06:23.818171684 +0000 UTC m=+1443.616003932" lastFinishedPulling="2025-11-21 14:06:31.215182598 +0000 UTC m=+1451.013014846" observedRunningTime="2025-11-21 14:06:32.008667658 +0000 UTC m=+1451.806499916" watchObservedRunningTime="2025-11-21 14:06:32.01326155 +0000 UTC m=+1451.811093818" Nov 21 14:06:32 crc kubenswrapper[5133]: I1121 14:06:32.075427 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-j5d8m" Nov 21 14:06:32 crc kubenswrapper[5133]: I1121 14:06:32.075494 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-j5d8m" Nov 21 14:06:32 crc kubenswrapper[5133]: I1121 14:06:32.998824 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"de7cad07-b950-4609-b720-18e65526b93c","Type":"ContainerStarted","Data":"a24df4d6de3085c7fae9776a6da29ea5f14bdcdd0b34744d728905f39088343c"} Nov 21 14:06:32 crc kubenswrapper[5133]: I1121 14:06:32.999252 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 21 14:06:33 crc kubenswrapper[5133]: I1121 14:06:33.024409 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.887436828 podStartE2EDuration="8.024384802s" podCreationTimestamp="2025-11-21 14:06:25 +0000 UTC" firstStartedPulling="2025-11-21 14:06:26.159505189 +0000 UTC m=+1445.957337447" lastFinishedPulling="2025-11-21 14:06:32.296453173 +0000 UTC m=+1452.094285421" observedRunningTime="2025-11-21 14:06:33.020394626 +0000 UTC m=+1452.818226884" watchObservedRunningTime="2025-11-21 14:06:33.024384802 +0000 UTC m=+1452.822217040" Nov 21 14:06:33 crc kubenswrapper[5133]: I1121 14:06:33.126622 5133 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-j5d8m" podUID="243f1648-cb3f-4d5e-ac89-25469a1e9896" containerName="registry-server" probeResult="failure" output=< Nov 21 14:06:33 crc kubenswrapper[5133]: timeout: failed to connect service ":50051" within 1s Nov 21 14:06:33 crc kubenswrapper[5133]: > Nov 21 14:06:33 crc kubenswrapper[5133]: I1121 14:06:33.197596 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 21 14:06:33 crc kubenswrapper[5133]: I1121 14:06:33.225860 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 21 14:06:33 crc kubenswrapper[5133]: I1121 14:06:33.225913 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 21 14:06:33 crc kubenswrapper[5133]: I1121 14:06:33.234014 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 21 14:06:34 crc kubenswrapper[5133]: I1121 14:06:34.037432 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 21 14:06:34 crc kubenswrapper[5133]: I1121 14:06:34.308224 5133 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="4f45677f-30ca-4b41-b51b-7f78f43aca35" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.177:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 21 14:06:34 crc kubenswrapper[5133]: I1121 14:06:34.308286 5133 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="4f45677f-30ca-4b41-b51b-7f78f43aca35" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.177:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 21 14:06:39 crc kubenswrapper[5133]: I1121 14:06:39.035630 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 21 14:06:39 crc kubenswrapper[5133]: I1121 14:06:39.036766 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 21 14:06:39 crc kubenswrapper[5133]: I1121 14:06:39.043144 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 21 14:06:39 crc kubenswrapper[5133]: I1121 14:06:39.264481 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 21 14:06:40 crc kubenswrapper[5133]: I1121 14:06:40.882520 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 21 14:06:40 crc kubenswrapper[5133]: I1121 14:06:40.958545 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnczp\" (UniqueName: \"kubernetes.io/projected/614d147a-e367-4799-aa80-9704f71aa40c-kube-api-access-mnczp\") pod \"614d147a-e367-4799-aa80-9704f71aa40c\" (UID: \"614d147a-e367-4799-aa80-9704f71aa40c\") " Nov 21 14:06:40 crc kubenswrapper[5133]: I1121 14:06:40.958787 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/614d147a-e367-4799-aa80-9704f71aa40c-combined-ca-bundle\") pod \"614d147a-e367-4799-aa80-9704f71aa40c\" (UID: \"614d147a-e367-4799-aa80-9704f71aa40c\") " Nov 21 14:06:40 crc kubenswrapper[5133]: I1121 14:06:40.958868 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/614d147a-e367-4799-aa80-9704f71aa40c-config-data\") pod \"614d147a-e367-4799-aa80-9704f71aa40c\" (UID: \"614d147a-e367-4799-aa80-9704f71aa40c\") " Nov 21 14:06:40 crc kubenswrapper[5133]: I1121 14:06:40.967349 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/614d147a-e367-4799-aa80-9704f71aa40c-kube-api-access-mnczp" (OuterVolumeSpecName: "kube-api-access-mnczp") pod "614d147a-e367-4799-aa80-9704f71aa40c" (UID: "614d147a-e367-4799-aa80-9704f71aa40c"). InnerVolumeSpecName "kube-api-access-mnczp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:06:40 crc kubenswrapper[5133]: I1121 14:06:40.990026 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/614d147a-e367-4799-aa80-9704f71aa40c-config-data" (OuterVolumeSpecName: "config-data") pod "614d147a-e367-4799-aa80-9704f71aa40c" (UID: "614d147a-e367-4799-aa80-9704f71aa40c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:06:40 crc kubenswrapper[5133]: I1121 14:06:40.995151 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/614d147a-e367-4799-aa80-9704f71aa40c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "614d147a-e367-4799-aa80-9704f71aa40c" (UID: "614d147a-e367-4799-aa80-9704f71aa40c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:06:41 crc kubenswrapper[5133]: I1121 14:06:41.061314 5133 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/614d147a-e367-4799-aa80-9704f71aa40c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 14:06:41 crc kubenswrapper[5133]: I1121 14:06:41.061356 5133 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/614d147a-e367-4799-aa80-9704f71aa40c-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 14:06:41 crc kubenswrapper[5133]: I1121 14:06:41.061364 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnczp\" (UniqueName: \"kubernetes.io/projected/614d147a-e367-4799-aa80-9704f71aa40c-kube-api-access-mnczp\") on node \"crc\" DevicePath \"\"" Nov 21 14:06:41 crc kubenswrapper[5133]: I1121 14:06:41.090940 5133 generic.go:334] "Generic (PLEG): container finished" podID="614d147a-e367-4799-aa80-9704f71aa40c" containerID="b8fd34c5dc992e0eb7bb21ab6158be69062144a41f4b2edfa5dc13bec0816764" exitCode=137 Nov 21 14:06:41 crc kubenswrapper[5133]: I1121 14:06:41.091043 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"614d147a-e367-4799-aa80-9704f71aa40c","Type":"ContainerDied","Data":"b8fd34c5dc992e0eb7bb21ab6158be69062144a41f4b2edfa5dc13bec0816764"} Nov 21 14:06:41 crc kubenswrapper[5133]: I1121 14:06:41.091107 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"614d147a-e367-4799-aa80-9704f71aa40c","Type":"ContainerDied","Data":"8e3e99709e5e09c3d5986b3b1028951d8c0950df204a4973fc8f1b3cac84e6c6"} Nov 21 14:06:41 crc kubenswrapper[5133]: I1121 14:06:41.091128 5133 scope.go:117] "RemoveContainer" containerID="b8fd34c5dc992e0eb7bb21ab6158be69062144a41f4b2edfa5dc13bec0816764" Nov 21 14:06:41 crc kubenswrapper[5133]: I1121 14:06:41.091173 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 21 14:06:41 crc kubenswrapper[5133]: I1121 14:06:41.136295 5133 scope.go:117] "RemoveContainer" containerID="b8fd34c5dc992e0eb7bb21ab6158be69062144a41f4b2edfa5dc13bec0816764" Nov 21 14:06:41 crc kubenswrapper[5133]: E1121 14:06:41.136777 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b8fd34c5dc992e0eb7bb21ab6158be69062144a41f4b2edfa5dc13bec0816764\": container with ID starting with b8fd34c5dc992e0eb7bb21ab6158be69062144a41f4b2edfa5dc13bec0816764 not found: ID does not exist" containerID="b8fd34c5dc992e0eb7bb21ab6158be69062144a41f4b2edfa5dc13bec0816764" Nov 21 14:06:41 crc kubenswrapper[5133]: I1121 14:06:41.136808 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b8fd34c5dc992e0eb7bb21ab6158be69062144a41f4b2edfa5dc13bec0816764"} err="failed to get container status \"b8fd34c5dc992e0eb7bb21ab6158be69062144a41f4b2edfa5dc13bec0816764\": rpc error: code = NotFound desc = could not find container \"b8fd34c5dc992e0eb7bb21ab6158be69062144a41f4b2edfa5dc13bec0816764\": container with ID starting with b8fd34c5dc992e0eb7bb21ab6158be69062144a41f4b2edfa5dc13bec0816764 not found: ID does not exist" Nov 21 14:06:41 crc kubenswrapper[5133]: I1121 14:06:41.149064 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 21 14:06:41 crc kubenswrapper[5133]: I1121 14:06:41.157316 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 21 14:06:41 crc kubenswrapper[5133]: I1121 14:06:41.177432 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 21 14:06:41 crc kubenswrapper[5133]: E1121 14:06:41.179952 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="614d147a-e367-4799-aa80-9704f71aa40c" containerName="nova-cell1-novncproxy-novncproxy" Nov 21 14:06:41 crc kubenswrapper[5133]: I1121 14:06:41.179991 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="614d147a-e367-4799-aa80-9704f71aa40c" containerName="nova-cell1-novncproxy-novncproxy" Nov 21 14:06:41 crc kubenswrapper[5133]: I1121 14:06:41.180383 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="614d147a-e367-4799-aa80-9704f71aa40c" containerName="nova-cell1-novncproxy-novncproxy" Nov 21 14:06:41 crc kubenswrapper[5133]: I1121 14:06:41.183898 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 21 14:06:41 crc kubenswrapper[5133]: I1121 14:06:41.186705 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Nov 21 14:06:41 crc kubenswrapper[5133]: I1121 14:06:41.187041 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Nov 21 14:06:41 crc kubenswrapper[5133]: I1121 14:06:41.187867 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 21 14:06:41 crc kubenswrapper[5133]: I1121 14:06:41.195308 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 21 14:06:41 crc kubenswrapper[5133]: I1121 14:06:41.268970 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-px75m\" (UniqueName: \"kubernetes.io/projected/f1c57f0d-b96c-415b-9f2f-30a8ba4188a1-kube-api-access-px75m\") pod \"nova-cell1-novncproxy-0\" (UID: \"f1c57f0d-b96c-415b-9f2f-30a8ba4188a1\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 14:06:41 crc kubenswrapper[5133]: I1121 14:06:41.269597 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f1c57f0d-b96c-415b-9f2f-30a8ba4188a1-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"f1c57f0d-b96c-415b-9f2f-30a8ba4188a1\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 14:06:41 crc kubenswrapper[5133]: I1121 14:06:41.269753 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/f1c57f0d-b96c-415b-9f2f-30a8ba4188a1-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"f1c57f0d-b96c-415b-9f2f-30a8ba4188a1\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 14:06:41 crc kubenswrapper[5133]: I1121 14:06:41.269809 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/f1c57f0d-b96c-415b-9f2f-30a8ba4188a1-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"f1c57f0d-b96c-415b-9f2f-30a8ba4188a1\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 14:06:41 crc kubenswrapper[5133]: I1121 14:06:41.269837 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1c57f0d-b96c-415b-9f2f-30a8ba4188a1-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"f1c57f0d-b96c-415b-9f2f-30a8ba4188a1\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 14:06:41 crc kubenswrapper[5133]: I1121 14:06:41.372382 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-px75m\" (UniqueName: \"kubernetes.io/projected/f1c57f0d-b96c-415b-9f2f-30a8ba4188a1-kube-api-access-px75m\") pod \"nova-cell1-novncproxy-0\" (UID: \"f1c57f0d-b96c-415b-9f2f-30a8ba4188a1\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 14:06:41 crc kubenswrapper[5133]: I1121 14:06:41.372465 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f1c57f0d-b96c-415b-9f2f-30a8ba4188a1-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"f1c57f0d-b96c-415b-9f2f-30a8ba4188a1\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 14:06:41 crc kubenswrapper[5133]: I1121 14:06:41.372802 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/f1c57f0d-b96c-415b-9f2f-30a8ba4188a1-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"f1c57f0d-b96c-415b-9f2f-30a8ba4188a1\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 14:06:41 crc kubenswrapper[5133]: I1121 14:06:41.372883 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/f1c57f0d-b96c-415b-9f2f-30a8ba4188a1-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"f1c57f0d-b96c-415b-9f2f-30a8ba4188a1\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 14:06:41 crc kubenswrapper[5133]: I1121 14:06:41.372918 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1c57f0d-b96c-415b-9f2f-30a8ba4188a1-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"f1c57f0d-b96c-415b-9f2f-30a8ba4188a1\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 14:06:41 crc kubenswrapper[5133]: I1121 14:06:41.377941 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/f1c57f0d-b96c-415b-9f2f-30a8ba4188a1-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"f1c57f0d-b96c-415b-9f2f-30a8ba4188a1\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 14:06:41 crc kubenswrapper[5133]: I1121 14:06:41.378889 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1c57f0d-b96c-415b-9f2f-30a8ba4188a1-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"f1c57f0d-b96c-415b-9f2f-30a8ba4188a1\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 14:06:41 crc kubenswrapper[5133]: I1121 14:06:41.379903 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f1c57f0d-b96c-415b-9f2f-30a8ba4188a1-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"f1c57f0d-b96c-415b-9f2f-30a8ba4188a1\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 14:06:41 crc kubenswrapper[5133]: I1121 14:06:41.380644 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/f1c57f0d-b96c-415b-9f2f-30a8ba4188a1-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"f1c57f0d-b96c-415b-9f2f-30a8ba4188a1\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 14:06:41 crc kubenswrapper[5133]: I1121 14:06:41.390976 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-px75m\" (UniqueName: \"kubernetes.io/projected/f1c57f0d-b96c-415b-9f2f-30a8ba4188a1-kube-api-access-px75m\") pod \"nova-cell1-novncproxy-0\" (UID: \"f1c57f0d-b96c-415b-9f2f-30a8ba4188a1\") " pod="openstack/nova-cell1-novncproxy-0" Nov 21 14:06:41 crc kubenswrapper[5133]: I1121 14:06:41.514785 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 21 14:06:42 crc kubenswrapper[5133]: W1121 14:06:42.004055 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf1c57f0d_b96c_415b_9f2f_30a8ba4188a1.slice/crio-b61f2a7b7ec87fff3e67bf0aa5808c0110d1e2be7a630fe49ac0678b42f0ec1b WatchSource:0}: Error finding container b61f2a7b7ec87fff3e67bf0aa5808c0110d1e2be7a630fe49ac0678b42f0ec1b: Status 404 returned error can't find the container with id b61f2a7b7ec87fff3e67bf0aa5808c0110d1e2be7a630fe49ac0678b42f0ec1b Nov 21 14:06:42 crc kubenswrapper[5133]: I1121 14:06:42.005740 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 21 14:06:42 crc kubenswrapper[5133]: I1121 14:06:42.101889 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"f1c57f0d-b96c-415b-9f2f-30a8ba4188a1","Type":"ContainerStarted","Data":"b61f2a7b7ec87fff3e67bf0aa5808c0110d1e2be7a630fe49ac0678b42f0ec1b"} Nov 21 14:06:42 crc kubenswrapper[5133]: I1121 14:06:42.137025 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-j5d8m" Nov 21 14:06:42 crc kubenswrapper[5133]: I1121 14:06:42.193709 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-j5d8m" Nov 21 14:06:42 crc kubenswrapper[5133]: I1121 14:06:42.277433 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-j5d8m"] Nov 21 14:06:42 crc kubenswrapper[5133]: I1121 14:06:42.381236 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-gwctl"] Nov 21 14:06:42 crc kubenswrapper[5133]: I1121 14:06:42.381554 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-gwctl" podUID="d35fdb2a-b5cc-4bdb-84c9-d447c6e86842" containerName="registry-server" containerID="cri-o://d4f1d4992eb30f43a9970c10af9422be9363628f9270e9f136ef49ce734d5139" gracePeriod=2 Nov 21 14:06:42 crc kubenswrapper[5133]: I1121 14:06:42.470779 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="614d147a-e367-4799-aa80-9704f71aa40c" path="/var/lib/kubelet/pods/614d147a-e367-4799-aa80-9704f71aa40c/volumes" Nov 21 14:06:42 crc kubenswrapper[5133]: I1121 14:06:42.899354 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-gwctl" Nov 21 14:06:43 crc kubenswrapper[5133]: I1121 14:06:43.033747 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bl6sc\" (UniqueName: \"kubernetes.io/projected/d35fdb2a-b5cc-4bdb-84c9-d447c6e86842-kube-api-access-bl6sc\") pod \"d35fdb2a-b5cc-4bdb-84c9-d447c6e86842\" (UID: \"d35fdb2a-b5cc-4bdb-84c9-d447c6e86842\") " Nov 21 14:06:43 crc kubenswrapper[5133]: I1121 14:06:43.033838 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d35fdb2a-b5cc-4bdb-84c9-d447c6e86842-utilities\") pod \"d35fdb2a-b5cc-4bdb-84c9-d447c6e86842\" (UID: \"d35fdb2a-b5cc-4bdb-84c9-d447c6e86842\") " Nov 21 14:06:43 crc kubenswrapper[5133]: I1121 14:06:43.033881 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d35fdb2a-b5cc-4bdb-84c9-d447c6e86842-catalog-content\") pod \"d35fdb2a-b5cc-4bdb-84c9-d447c6e86842\" (UID: \"d35fdb2a-b5cc-4bdb-84c9-d447c6e86842\") " Nov 21 14:06:43 crc kubenswrapper[5133]: I1121 14:06:43.035413 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d35fdb2a-b5cc-4bdb-84c9-d447c6e86842-utilities" (OuterVolumeSpecName: "utilities") pod "d35fdb2a-b5cc-4bdb-84c9-d447c6e86842" (UID: "d35fdb2a-b5cc-4bdb-84c9-d447c6e86842"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:06:43 crc kubenswrapper[5133]: I1121 14:06:43.043254 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d35fdb2a-b5cc-4bdb-84c9-d447c6e86842-kube-api-access-bl6sc" (OuterVolumeSpecName: "kube-api-access-bl6sc") pod "d35fdb2a-b5cc-4bdb-84c9-d447c6e86842" (UID: "d35fdb2a-b5cc-4bdb-84c9-d447c6e86842"). InnerVolumeSpecName "kube-api-access-bl6sc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:06:43 crc kubenswrapper[5133]: I1121 14:06:43.084083 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d35fdb2a-b5cc-4bdb-84c9-d447c6e86842-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d35fdb2a-b5cc-4bdb-84c9-d447c6e86842" (UID: "d35fdb2a-b5cc-4bdb-84c9-d447c6e86842"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:06:43 crc kubenswrapper[5133]: I1121 14:06:43.115939 5133 generic.go:334] "Generic (PLEG): container finished" podID="d35fdb2a-b5cc-4bdb-84c9-d447c6e86842" containerID="d4f1d4992eb30f43a9970c10af9422be9363628f9270e9f136ef49ce734d5139" exitCode=0 Nov 21 14:06:43 crc kubenswrapper[5133]: I1121 14:06:43.116086 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gwctl" event={"ID":"d35fdb2a-b5cc-4bdb-84c9-d447c6e86842","Type":"ContainerDied","Data":"d4f1d4992eb30f43a9970c10af9422be9363628f9270e9f136ef49ce734d5139"} Nov 21 14:06:43 crc kubenswrapper[5133]: I1121 14:06:43.116123 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-gwctl" Nov 21 14:06:43 crc kubenswrapper[5133]: I1121 14:06:43.116214 5133 scope.go:117] "RemoveContainer" containerID="d4f1d4992eb30f43a9970c10af9422be9363628f9270e9f136ef49ce734d5139" Nov 21 14:06:43 crc kubenswrapper[5133]: I1121 14:06:43.116193 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gwctl" event={"ID":"d35fdb2a-b5cc-4bdb-84c9-d447c6e86842","Type":"ContainerDied","Data":"dd25a4e4e1f51e0475fb6b1fb44c41216328ba8a0472307e6f8c1e19a4c1f4d4"} Nov 21 14:06:43 crc kubenswrapper[5133]: I1121 14:06:43.117816 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"f1c57f0d-b96c-415b-9f2f-30a8ba4188a1","Type":"ContainerStarted","Data":"812bdd4b76d4c3b6a35b701ac5989e12690f3f9fa7c3fe47679c3bdf11d0c066"} Nov 21 14:06:43 crc kubenswrapper[5133]: I1121 14:06:43.138932 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bl6sc\" (UniqueName: \"kubernetes.io/projected/d35fdb2a-b5cc-4bdb-84c9-d447c6e86842-kube-api-access-bl6sc\") on node \"crc\" DevicePath \"\"" Nov 21 14:06:43 crc kubenswrapper[5133]: I1121 14:06:43.138966 5133 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d35fdb2a-b5cc-4bdb-84c9-d447c6e86842-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 14:06:43 crc kubenswrapper[5133]: I1121 14:06:43.138976 5133 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d35fdb2a-b5cc-4bdb-84c9-d447c6e86842-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 14:06:43 crc kubenswrapper[5133]: I1121 14:06:43.153342 5133 scope.go:117] "RemoveContainer" containerID="9834329efa71ddf7b6fd0a5ffea13c1a94183aa59ca056300ab8eeda2e4ea3bb" Nov 21 14:06:43 crc kubenswrapper[5133]: I1121 14:06:43.158721 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.158695034 podStartE2EDuration="2.158695034s" podCreationTimestamp="2025-11-21 14:06:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 14:06:43.146992262 +0000 UTC m=+1462.944824510" watchObservedRunningTime="2025-11-21 14:06:43.158695034 +0000 UTC m=+1462.956527272" Nov 21 14:06:43 crc kubenswrapper[5133]: I1121 14:06:43.174694 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-gwctl"] Nov 21 14:06:43 crc kubenswrapper[5133]: I1121 14:06:43.181838 5133 scope.go:117] "RemoveContainer" containerID="12258c9f04ca0b87cd791bdb5a5b2aaac12902c58960591bd1bdff68d30f5575" Nov 21 14:06:43 crc kubenswrapper[5133]: I1121 14:06:43.182727 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-gwctl"] Nov 21 14:06:43 crc kubenswrapper[5133]: I1121 14:06:43.200878 5133 scope.go:117] "RemoveContainer" containerID="d4f1d4992eb30f43a9970c10af9422be9363628f9270e9f136ef49ce734d5139" Nov 21 14:06:43 crc kubenswrapper[5133]: E1121 14:06:43.201700 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d4f1d4992eb30f43a9970c10af9422be9363628f9270e9f136ef49ce734d5139\": container with ID starting with d4f1d4992eb30f43a9970c10af9422be9363628f9270e9f136ef49ce734d5139 not found: ID does not exist" containerID="d4f1d4992eb30f43a9970c10af9422be9363628f9270e9f136ef49ce734d5139" Nov 21 14:06:43 crc kubenswrapper[5133]: I1121 14:06:43.201751 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d4f1d4992eb30f43a9970c10af9422be9363628f9270e9f136ef49ce734d5139"} err="failed to get container status \"d4f1d4992eb30f43a9970c10af9422be9363628f9270e9f136ef49ce734d5139\": rpc error: code = NotFound desc = could not find container \"d4f1d4992eb30f43a9970c10af9422be9363628f9270e9f136ef49ce734d5139\": container with ID starting with d4f1d4992eb30f43a9970c10af9422be9363628f9270e9f136ef49ce734d5139 not found: ID does not exist" Nov 21 14:06:43 crc kubenswrapper[5133]: I1121 14:06:43.201790 5133 scope.go:117] "RemoveContainer" containerID="9834329efa71ddf7b6fd0a5ffea13c1a94183aa59ca056300ab8eeda2e4ea3bb" Nov 21 14:06:43 crc kubenswrapper[5133]: E1121 14:06:43.202112 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9834329efa71ddf7b6fd0a5ffea13c1a94183aa59ca056300ab8eeda2e4ea3bb\": container with ID starting with 9834329efa71ddf7b6fd0a5ffea13c1a94183aa59ca056300ab8eeda2e4ea3bb not found: ID does not exist" containerID="9834329efa71ddf7b6fd0a5ffea13c1a94183aa59ca056300ab8eeda2e4ea3bb" Nov 21 14:06:43 crc kubenswrapper[5133]: I1121 14:06:43.202153 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9834329efa71ddf7b6fd0a5ffea13c1a94183aa59ca056300ab8eeda2e4ea3bb"} err="failed to get container status \"9834329efa71ddf7b6fd0a5ffea13c1a94183aa59ca056300ab8eeda2e4ea3bb\": rpc error: code = NotFound desc = could not find container \"9834329efa71ddf7b6fd0a5ffea13c1a94183aa59ca056300ab8eeda2e4ea3bb\": container with ID starting with 9834329efa71ddf7b6fd0a5ffea13c1a94183aa59ca056300ab8eeda2e4ea3bb not found: ID does not exist" Nov 21 14:06:43 crc kubenswrapper[5133]: I1121 14:06:43.202181 5133 scope.go:117] "RemoveContainer" containerID="12258c9f04ca0b87cd791bdb5a5b2aaac12902c58960591bd1bdff68d30f5575" Nov 21 14:06:43 crc kubenswrapper[5133]: E1121 14:06:43.202414 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"12258c9f04ca0b87cd791bdb5a5b2aaac12902c58960591bd1bdff68d30f5575\": container with ID starting with 12258c9f04ca0b87cd791bdb5a5b2aaac12902c58960591bd1bdff68d30f5575 not found: ID does not exist" containerID="12258c9f04ca0b87cd791bdb5a5b2aaac12902c58960591bd1bdff68d30f5575" Nov 21 14:06:43 crc kubenswrapper[5133]: I1121 14:06:43.202429 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"12258c9f04ca0b87cd791bdb5a5b2aaac12902c58960591bd1bdff68d30f5575"} err="failed to get container status \"12258c9f04ca0b87cd791bdb5a5b2aaac12902c58960591bd1bdff68d30f5575\": rpc error: code = NotFound desc = could not find container \"12258c9f04ca0b87cd791bdb5a5b2aaac12902c58960591bd1bdff68d30f5575\": container with ID starting with 12258c9f04ca0b87cd791bdb5a5b2aaac12902c58960591bd1bdff68d30f5575 not found: ID does not exist" Nov 21 14:06:43 crc kubenswrapper[5133]: I1121 14:06:43.229400 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 21 14:06:43 crc kubenswrapper[5133]: I1121 14:06:43.229864 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 21 14:06:43 crc kubenswrapper[5133]: I1121 14:06:43.231575 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 21 14:06:43 crc kubenswrapper[5133]: I1121 14:06:43.232812 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 21 14:06:44 crc kubenswrapper[5133]: I1121 14:06:44.129759 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 21 14:06:44 crc kubenswrapper[5133]: I1121 14:06:44.134134 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 21 14:06:44 crc kubenswrapper[5133]: I1121 14:06:44.318905 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-68d4b6d797-qk78s"] Nov 21 14:06:44 crc kubenswrapper[5133]: E1121 14:06:44.319524 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d35fdb2a-b5cc-4bdb-84c9-d447c6e86842" containerName="extract-content" Nov 21 14:06:44 crc kubenswrapper[5133]: I1121 14:06:44.319554 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="d35fdb2a-b5cc-4bdb-84c9-d447c6e86842" containerName="extract-content" Nov 21 14:06:44 crc kubenswrapper[5133]: E1121 14:06:44.319572 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d35fdb2a-b5cc-4bdb-84c9-d447c6e86842" containerName="extract-utilities" Nov 21 14:06:44 crc kubenswrapper[5133]: I1121 14:06:44.319582 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="d35fdb2a-b5cc-4bdb-84c9-d447c6e86842" containerName="extract-utilities" Nov 21 14:06:44 crc kubenswrapper[5133]: E1121 14:06:44.319593 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d35fdb2a-b5cc-4bdb-84c9-d447c6e86842" containerName="registry-server" Nov 21 14:06:44 crc kubenswrapper[5133]: I1121 14:06:44.319602 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="d35fdb2a-b5cc-4bdb-84c9-d447c6e86842" containerName="registry-server" Nov 21 14:06:44 crc kubenswrapper[5133]: I1121 14:06:44.319894 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="d35fdb2a-b5cc-4bdb-84c9-d447c6e86842" containerName="registry-server" Nov 21 14:06:44 crc kubenswrapper[5133]: I1121 14:06:44.321308 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-68d4b6d797-qk78s" Nov 21 14:06:44 crc kubenswrapper[5133]: I1121 14:06:44.341280 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-68d4b6d797-qk78s"] Nov 21 14:06:44 crc kubenswrapper[5133]: I1121 14:06:44.470043 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/33fa8cc5-ffef-4e3b-8816-e643c09ff259-config\") pod \"dnsmasq-dns-68d4b6d797-qk78s\" (UID: \"33fa8cc5-ffef-4e3b-8816-e643c09ff259\") " pod="openstack/dnsmasq-dns-68d4b6d797-qk78s" Nov 21 14:06:44 crc kubenswrapper[5133]: I1121 14:06:44.470043 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d35fdb2a-b5cc-4bdb-84c9-d447c6e86842" path="/var/lib/kubelet/pods/d35fdb2a-b5cc-4bdb-84c9-d447c6e86842/volumes" Nov 21 14:06:44 crc kubenswrapper[5133]: I1121 14:06:44.470357 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/33fa8cc5-ffef-4e3b-8816-e643c09ff259-ovsdbserver-nb\") pod \"dnsmasq-dns-68d4b6d797-qk78s\" (UID: \"33fa8cc5-ffef-4e3b-8816-e643c09ff259\") " pod="openstack/dnsmasq-dns-68d4b6d797-qk78s" Nov 21 14:06:44 crc kubenswrapper[5133]: I1121 14:06:44.470460 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/33fa8cc5-ffef-4e3b-8816-e643c09ff259-dns-svc\") pod \"dnsmasq-dns-68d4b6d797-qk78s\" (UID: \"33fa8cc5-ffef-4e3b-8816-e643c09ff259\") " pod="openstack/dnsmasq-dns-68d4b6d797-qk78s" Nov 21 14:06:44 crc kubenswrapper[5133]: I1121 14:06:44.470548 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/33fa8cc5-ffef-4e3b-8816-e643c09ff259-ovsdbserver-sb\") pod \"dnsmasq-dns-68d4b6d797-qk78s\" (UID: \"33fa8cc5-ffef-4e3b-8816-e643c09ff259\") " pod="openstack/dnsmasq-dns-68d4b6d797-qk78s" Nov 21 14:06:44 crc kubenswrapper[5133]: I1121 14:06:44.470666 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-njwbp\" (UniqueName: \"kubernetes.io/projected/33fa8cc5-ffef-4e3b-8816-e643c09ff259-kube-api-access-njwbp\") pod \"dnsmasq-dns-68d4b6d797-qk78s\" (UID: \"33fa8cc5-ffef-4e3b-8816-e643c09ff259\") " pod="openstack/dnsmasq-dns-68d4b6d797-qk78s" Nov 21 14:06:44 crc kubenswrapper[5133]: I1121 14:06:44.571772 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/33fa8cc5-ffef-4e3b-8816-e643c09ff259-ovsdbserver-sb\") pod \"dnsmasq-dns-68d4b6d797-qk78s\" (UID: \"33fa8cc5-ffef-4e3b-8816-e643c09ff259\") " pod="openstack/dnsmasq-dns-68d4b6d797-qk78s" Nov 21 14:06:44 crc kubenswrapper[5133]: I1121 14:06:44.571855 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-njwbp\" (UniqueName: \"kubernetes.io/projected/33fa8cc5-ffef-4e3b-8816-e643c09ff259-kube-api-access-njwbp\") pod \"dnsmasq-dns-68d4b6d797-qk78s\" (UID: \"33fa8cc5-ffef-4e3b-8816-e643c09ff259\") " pod="openstack/dnsmasq-dns-68d4b6d797-qk78s" Nov 21 14:06:44 crc kubenswrapper[5133]: I1121 14:06:44.571912 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/33fa8cc5-ffef-4e3b-8816-e643c09ff259-config\") pod \"dnsmasq-dns-68d4b6d797-qk78s\" (UID: \"33fa8cc5-ffef-4e3b-8816-e643c09ff259\") " pod="openstack/dnsmasq-dns-68d4b6d797-qk78s" Nov 21 14:06:44 crc kubenswrapper[5133]: I1121 14:06:44.572011 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/33fa8cc5-ffef-4e3b-8816-e643c09ff259-ovsdbserver-nb\") pod \"dnsmasq-dns-68d4b6d797-qk78s\" (UID: \"33fa8cc5-ffef-4e3b-8816-e643c09ff259\") " pod="openstack/dnsmasq-dns-68d4b6d797-qk78s" Nov 21 14:06:44 crc kubenswrapper[5133]: I1121 14:06:44.572035 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/33fa8cc5-ffef-4e3b-8816-e643c09ff259-dns-svc\") pod \"dnsmasq-dns-68d4b6d797-qk78s\" (UID: \"33fa8cc5-ffef-4e3b-8816-e643c09ff259\") " pod="openstack/dnsmasq-dns-68d4b6d797-qk78s" Nov 21 14:06:44 crc kubenswrapper[5133]: I1121 14:06:44.572590 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/33fa8cc5-ffef-4e3b-8816-e643c09ff259-ovsdbserver-sb\") pod \"dnsmasq-dns-68d4b6d797-qk78s\" (UID: \"33fa8cc5-ffef-4e3b-8816-e643c09ff259\") " pod="openstack/dnsmasq-dns-68d4b6d797-qk78s" Nov 21 14:06:44 crc kubenswrapper[5133]: I1121 14:06:44.572606 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/33fa8cc5-ffef-4e3b-8816-e643c09ff259-dns-svc\") pod \"dnsmasq-dns-68d4b6d797-qk78s\" (UID: \"33fa8cc5-ffef-4e3b-8816-e643c09ff259\") " pod="openstack/dnsmasq-dns-68d4b6d797-qk78s" Nov 21 14:06:44 crc kubenswrapper[5133]: I1121 14:06:44.573248 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/33fa8cc5-ffef-4e3b-8816-e643c09ff259-config\") pod \"dnsmasq-dns-68d4b6d797-qk78s\" (UID: \"33fa8cc5-ffef-4e3b-8816-e643c09ff259\") " pod="openstack/dnsmasq-dns-68d4b6d797-qk78s" Nov 21 14:06:44 crc kubenswrapper[5133]: I1121 14:06:44.575890 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/33fa8cc5-ffef-4e3b-8816-e643c09ff259-ovsdbserver-nb\") pod \"dnsmasq-dns-68d4b6d797-qk78s\" (UID: \"33fa8cc5-ffef-4e3b-8816-e643c09ff259\") " pod="openstack/dnsmasq-dns-68d4b6d797-qk78s" Nov 21 14:06:44 crc kubenswrapper[5133]: I1121 14:06:44.606716 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-njwbp\" (UniqueName: \"kubernetes.io/projected/33fa8cc5-ffef-4e3b-8816-e643c09ff259-kube-api-access-njwbp\") pod \"dnsmasq-dns-68d4b6d797-qk78s\" (UID: \"33fa8cc5-ffef-4e3b-8816-e643c09ff259\") " pod="openstack/dnsmasq-dns-68d4b6d797-qk78s" Nov 21 14:06:44 crc kubenswrapper[5133]: I1121 14:06:44.644190 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-68d4b6d797-qk78s" Nov 21 14:06:45 crc kubenswrapper[5133]: W1121 14:06:45.156246 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod33fa8cc5_ffef_4e3b_8816_e643c09ff259.slice/crio-9fecb190a31ef82c0080ee668495979969d6f7b60a70209a50aa20d57946fe23 WatchSource:0}: Error finding container 9fecb190a31ef82c0080ee668495979969d6f7b60a70209a50aa20d57946fe23: Status 404 returned error can't find the container with id 9fecb190a31ef82c0080ee668495979969d6f7b60a70209a50aa20d57946fe23 Nov 21 14:06:45 crc kubenswrapper[5133]: I1121 14:06:45.158709 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-68d4b6d797-qk78s"] Nov 21 14:06:46 crc kubenswrapper[5133]: I1121 14:06:46.151728 5133 generic.go:334] "Generic (PLEG): container finished" podID="33fa8cc5-ffef-4e3b-8816-e643c09ff259" containerID="4a0d33d8e7e2e22cec3c93bff1b15a28cb06bfde2d35e28a475554e37986c5ee" exitCode=0 Nov 21 14:06:46 crc kubenswrapper[5133]: I1121 14:06:46.151835 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68d4b6d797-qk78s" event={"ID":"33fa8cc5-ffef-4e3b-8816-e643c09ff259","Type":"ContainerDied","Data":"4a0d33d8e7e2e22cec3c93bff1b15a28cb06bfde2d35e28a475554e37986c5ee"} Nov 21 14:06:46 crc kubenswrapper[5133]: I1121 14:06:46.152335 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68d4b6d797-qk78s" event={"ID":"33fa8cc5-ffef-4e3b-8816-e643c09ff259","Type":"ContainerStarted","Data":"9fecb190a31ef82c0080ee668495979969d6f7b60a70209a50aa20d57946fe23"} Nov 21 14:06:46 crc kubenswrapper[5133]: I1121 14:06:46.515674 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 21 14:06:47 crc kubenswrapper[5133]: I1121 14:06:47.062984 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 21 14:06:47 crc kubenswrapper[5133]: I1121 14:06:47.063736 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="de7cad07-b950-4609-b720-18e65526b93c" containerName="ceilometer-central-agent" containerID="cri-o://dd86b111685211ab293aa2262a44ab1bbc05a1539d44dc44687270f9ecd0dcac" gracePeriod=30 Nov 21 14:06:47 crc kubenswrapper[5133]: I1121 14:06:47.063839 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="de7cad07-b950-4609-b720-18e65526b93c" containerName="sg-core" containerID="cri-o://2cb83daf26e32070f045638ac983786a894ecb9b97fa9fc1185e54f154e16d96" gracePeriod=30 Nov 21 14:06:47 crc kubenswrapper[5133]: I1121 14:06:47.063856 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="de7cad07-b950-4609-b720-18e65526b93c" containerName="proxy-httpd" containerID="cri-o://a24df4d6de3085c7fae9776a6da29ea5f14bdcdd0b34744d728905f39088343c" gracePeriod=30 Nov 21 14:06:47 crc kubenswrapper[5133]: I1121 14:06:47.063968 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="de7cad07-b950-4609-b720-18e65526b93c" containerName="ceilometer-notification-agent" containerID="cri-o://467e748fd5f5dcd1f05f552cda96a1f1e759e9c4a404dcfa043b077c527bb7ce" gracePeriod=30 Nov 21 14:06:47 crc kubenswrapper[5133]: I1121 14:06:47.072207 5133 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="de7cad07-b950-4609-b720-18e65526b93c" containerName="proxy-httpd" probeResult="failure" output="Get \"https://10.217.0.178:3000/\": read tcp 10.217.0.2:45932->10.217.0.178:3000: read: connection reset by peer" Nov 21 14:06:47 crc kubenswrapper[5133]: I1121 14:06:47.163532 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68d4b6d797-qk78s" event={"ID":"33fa8cc5-ffef-4e3b-8816-e643c09ff259","Type":"ContainerStarted","Data":"9d8af020fcabe90d00acb5ecde08a3014609bb733173a3643bb6fa952f36c8f8"} Nov 21 14:06:47 crc kubenswrapper[5133]: I1121 14:06:47.163853 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-68d4b6d797-qk78s" Nov 21 14:06:47 crc kubenswrapper[5133]: I1121 14:06:47.189862 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-68d4b6d797-qk78s" podStartSLOduration=3.189845574 podStartE2EDuration="3.189845574s" podCreationTimestamp="2025-11-21 14:06:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 14:06:47.185050946 +0000 UTC m=+1466.982883204" watchObservedRunningTime="2025-11-21 14:06:47.189845574 +0000 UTC m=+1466.987677832" Nov 21 14:06:47 crc kubenswrapper[5133]: I1121 14:06:47.750354 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 21 14:06:47 crc kubenswrapper[5133]: I1121 14:06:47.750973 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="4f45677f-30ca-4b41-b51b-7f78f43aca35" containerName="nova-api-log" containerID="cri-o://b006b0e429049c30fabb4a139e775063545ccd842780064378dd54f62d8065b3" gracePeriod=30 Nov 21 14:06:47 crc kubenswrapper[5133]: I1121 14:06:47.751162 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="4f45677f-30ca-4b41-b51b-7f78f43aca35" containerName="nova-api-api" containerID="cri-o://3fb7d9bb90d270d6a8c13ea0147a9db8ad659df6f8a7fcfdd90388a0af0b1e33" gracePeriod=30 Nov 21 14:06:48 crc kubenswrapper[5133]: I1121 14:06:48.174474 5133 generic.go:334] "Generic (PLEG): container finished" podID="4f45677f-30ca-4b41-b51b-7f78f43aca35" containerID="b006b0e429049c30fabb4a139e775063545ccd842780064378dd54f62d8065b3" exitCode=143 Nov 21 14:06:48 crc kubenswrapper[5133]: I1121 14:06:48.174565 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"4f45677f-30ca-4b41-b51b-7f78f43aca35","Type":"ContainerDied","Data":"b006b0e429049c30fabb4a139e775063545ccd842780064378dd54f62d8065b3"} Nov 21 14:06:48 crc kubenswrapper[5133]: I1121 14:06:48.177538 5133 generic.go:334] "Generic (PLEG): container finished" podID="de7cad07-b950-4609-b720-18e65526b93c" containerID="a24df4d6de3085c7fae9776a6da29ea5f14bdcdd0b34744d728905f39088343c" exitCode=0 Nov 21 14:06:48 crc kubenswrapper[5133]: I1121 14:06:48.177578 5133 generic.go:334] "Generic (PLEG): container finished" podID="de7cad07-b950-4609-b720-18e65526b93c" containerID="2cb83daf26e32070f045638ac983786a894ecb9b97fa9fc1185e54f154e16d96" exitCode=2 Nov 21 14:06:48 crc kubenswrapper[5133]: I1121 14:06:48.177590 5133 generic.go:334] "Generic (PLEG): container finished" podID="de7cad07-b950-4609-b720-18e65526b93c" containerID="dd86b111685211ab293aa2262a44ab1bbc05a1539d44dc44687270f9ecd0dcac" exitCode=0 Nov 21 14:06:48 crc kubenswrapper[5133]: I1121 14:06:48.177611 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"de7cad07-b950-4609-b720-18e65526b93c","Type":"ContainerDied","Data":"a24df4d6de3085c7fae9776a6da29ea5f14bdcdd0b34744d728905f39088343c"} Nov 21 14:06:48 crc kubenswrapper[5133]: I1121 14:06:48.177662 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"de7cad07-b950-4609-b720-18e65526b93c","Type":"ContainerDied","Data":"2cb83daf26e32070f045638ac983786a894ecb9b97fa9fc1185e54f154e16d96"} Nov 21 14:06:48 crc kubenswrapper[5133]: I1121 14:06:48.177678 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"de7cad07-b950-4609-b720-18e65526b93c","Type":"ContainerDied","Data":"dd86b111685211ab293aa2262a44ab1bbc05a1539d44dc44687270f9ecd0dcac"} Nov 21 14:06:50 crc kubenswrapper[5133]: I1121 14:06:50.209526 5133 generic.go:334] "Generic (PLEG): container finished" podID="de7cad07-b950-4609-b720-18e65526b93c" containerID="467e748fd5f5dcd1f05f552cda96a1f1e759e9c4a404dcfa043b077c527bb7ce" exitCode=0 Nov 21 14:06:50 crc kubenswrapper[5133]: I1121 14:06:50.209765 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"de7cad07-b950-4609-b720-18e65526b93c","Type":"ContainerDied","Data":"467e748fd5f5dcd1f05f552cda96a1f1e759e9c4a404dcfa043b077c527bb7ce"} Nov 21 14:06:50 crc kubenswrapper[5133]: I1121 14:06:50.585723 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 14:06:50 crc kubenswrapper[5133]: I1121 14:06:50.690707 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de7cad07-b950-4609-b720-18e65526b93c-config-data\") pod \"de7cad07-b950-4609-b720-18e65526b93c\" (UID: \"de7cad07-b950-4609-b720-18e65526b93c\") " Nov 21 14:06:50 crc kubenswrapper[5133]: I1121 14:06:50.690798 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/de7cad07-b950-4609-b720-18e65526b93c-sg-core-conf-yaml\") pod \"de7cad07-b950-4609-b720-18e65526b93c\" (UID: \"de7cad07-b950-4609-b720-18e65526b93c\") " Nov 21 14:06:50 crc kubenswrapper[5133]: I1121 14:06:50.690857 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de7cad07-b950-4609-b720-18e65526b93c-combined-ca-bundle\") pod \"de7cad07-b950-4609-b720-18e65526b93c\" (UID: \"de7cad07-b950-4609-b720-18e65526b93c\") " Nov 21 14:06:50 crc kubenswrapper[5133]: I1121 14:06:50.690881 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/de7cad07-b950-4609-b720-18e65526b93c-scripts\") pod \"de7cad07-b950-4609-b720-18e65526b93c\" (UID: \"de7cad07-b950-4609-b720-18e65526b93c\") " Nov 21 14:06:50 crc kubenswrapper[5133]: I1121 14:06:50.690945 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/de7cad07-b950-4609-b720-18e65526b93c-ceilometer-tls-certs\") pod \"de7cad07-b950-4609-b720-18e65526b93c\" (UID: \"de7cad07-b950-4609-b720-18e65526b93c\") " Nov 21 14:06:50 crc kubenswrapper[5133]: I1121 14:06:50.690976 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/de7cad07-b950-4609-b720-18e65526b93c-run-httpd\") pod \"de7cad07-b950-4609-b720-18e65526b93c\" (UID: \"de7cad07-b950-4609-b720-18e65526b93c\") " Nov 21 14:06:50 crc kubenswrapper[5133]: I1121 14:06:50.691051 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/de7cad07-b950-4609-b720-18e65526b93c-log-httpd\") pod \"de7cad07-b950-4609-b720-18e65526b93c\" (UID: \"de7cad07-b950-4609-b720-18e65526b93c\") " Nov 21 14:06:50 crc kubenswrapper[5133]: I1121 14:06:50.691100 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j4x28\" (UniqueName: \"kubernetes.io/projected/de7cad07-b950-4609-b720-18e65526b93c-kube-api-access-j4x28\") pod \"de7cad07-b950-4609-b720-18e65526b93c\" (UID: \"de7cad07-b950-4609-b720-18e65526b93c\") " Nov 21 14:06:50 crc kubenswrapper[5133]: I1121 14:06:50.691782 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/de7cad07-b950-4609-b720-18e65526b93c-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "de7cad07-b950-4609-b720-18e65526b93c" (UID: "de7cad07-b950-4609-b720-18e65526b93c"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:06:50 crc kubenswrapper[5133]: I1121 14:06:50.691952 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/de7cad07-b950-4609-b720-18e65526b93c-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "de7cad07-b950-4609-b720-18e65526b93c" (UID: "de7cad07-b950-4609-b720-18e65526b93c"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:06:50 crc kubenswrapper[5133]: I1121 14:06:50.698185 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/de7cad07-b950-4609-b720-18e65526b93c-kube-api-access-j4x28" (OuterVolumeSpecName: "kube-api-access-j4x28") pod "de7cad07-b950-4609-b720-18e65526b93c" (UID: "de7cad07-b950-4609-b720-18e65526b93c"). InnerVolumeSpecName "kube-api-access-j4x28". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:06:50 crc kubenswrapper[5133]: I1121 14:06:50.698273 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/de7cad07-b950-4609-b720-18e65526b93c-scripts" (OuterVolumeSpecName: "scripts") pod "de7cad07-b950-4609-b720-18e65526b93c" (UID: "de7cad07-b950-4609-b720-18e65526b93c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:06:50 crc kubenswrapper[5133]: I1121 14:06:50.721129 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/de7cad07-b950-4609-b720-18e65526b93c-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "de7cad07-b950-4609-b720-18e65526b93c" (UID: "de7cad07-b950-4609-b720-18e65526b93c"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:06:50 crc kubenswrapper[5133]: I1121 14:06:50.770256 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/de7cad07-b950-4609-b720-18e65526b93c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "de7cad07-b950-4609-b720-18e65526b93c" (UID: "de7cad07-b950-4609-b720-18e65526b93c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:06:50 crc kubenswrapper[5133]: I1121 14:06:50.803938 5133 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/de7cad07-b950-4609-b720-18e65526b93c-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 21 14:06:50 crc kubenswrapper[5133]: I1121 14:06:50.804024 5133 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de7cad07-b950-4609-b720-18e65526b93c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 14:06:50 crc kubenswrapper[5133]: I1121 14:06:50.804048 5133 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/de7cad07-b950-4609-b720-18e65526b93c-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 14:06:50 crc kubenswrapper[5133]: I1121 14:06:50.804065 5133 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/de7cad07-b950-4609-b720-18e65526b93c-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 21 14:06:50 crc kubenswrapper[5133]: I1121 14:06:50.804083 5133 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/de7cad07-b950-4609-b720-18e65526b93c-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 21 14:06:50 crc kubenswrapper[5133]: I1121 14:06:50.804108 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j4x28\" (UniqueName: \"kubernetes.io/projected/de7cad07-b950-4609-b720-18e65526b93c-kube-api-access-j4x28\") on node \"crc\" DevicePath \"\"" Nov 21 14:06:50 crc kubenswrapper[5133]: I1121 14:06:50.811772 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/de7cad07-b950-4609-b720-18e65526b93c-config-data" (OuterVolumeSpecName: "config-data") pod "de7cad07-b950-4609-b720-18e65526b93c" (UID: "de7cad07-b950-4609-b720-18e65526b93c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:06:50 crc kubenswrapper[5133]: I1121 14:06:50.812314 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/de7cad07-b950-4609-b720-18e65526b93c-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "de7cad07-b950-4609-b720-18e65526b93c" (UID: "de7cad07-b950-4609-b720-18e65526b93c"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:06:50 crc kubenswrapper[5133]: I1121 14:06:50.906255 5133 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/de7cad07-b950-4609-b720-18e65526b93c-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 21 14:06:50 crc kubenswrapper[5133]: I1121 14:06:50.906302 5133 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/de7cad07-b950-4609-b720-18e65526b93c-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 14:06:51 crc kubenswrapper[5133]: I1121 14:06:51.256685 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"de7cad07-b950-4609-b720-18e65526b93c","Type":"ContainerDied","Data":"95b80841dfc4e7272048d5fabaf5afb54835756c988c90d70b1c7699ca534498"} Nov 21 14:06:51 crc kubenswrapper[5133]: I1121 14:06:51.256745 5133 scope.go:117] "RemoveContainer" containerID="a24df4d6de3085c7fae9776a6da29ea5f14bdcdd0b34744d728905f39088343c" Nov 21 14:06:51 crc kubenswrapper[5133]: I1121 14:06:51.256807 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 14:06:51 crc kubenswrapper[5133]: I1121 14:06:51.266583 5133 generic.go:334] "Generic (PLEG): container finished" podID="4f45677f-30ca-4b41-b51b-7f78f43aca35" containerID="3fb7d9bb90d270d6a8c13ea0147a9db8ad659df6f8a7fcfdd90388a0af0b1e33" exitCode=0 Nov 21 14:06:51 crc kubenswrapper[5133]: I1121 14:06:51.266621 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"4f45677f-30ca-4b41-b51b-7f78f43aca35","Type":"ContainerDied","Data":"3fb7d9bb90d270d6a8c13ea0147a9db8ad659df6f8a7fcfdd90388a0af0b1e33"} Nov 21 14:06:51 crc kubenswrapper[5133]: I1121 14:06:51.296231 5133 scope.go:117] "RemoveContainer" containerID="2cb83daf26e32070f045638ac983786a894ecb9b97fa9fc1185e54f154e16d96" Nov 21 14:06:51 crc kubenswrapper[5133]: I1121 14:06:51.317818 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 21 14:06:51 crc kubenswrapper[5133]: I1121 14:06:51.324628 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 21 14:06:51 crc kubenswrapper[5133]: I1121 14:06:51.330417 5133 scope.go:117] "RemoveContainer" containerID="467e748fd5f5dcd1f05f552cda96a1f1e759e9c4a404dcfa043b077c527bb7ce" Nov 21 14:06:51 crc kubenswrapper[5133]: I1121 14:06:51.338810 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 21 14:06:51 crc kubenswrapper[5133]: E1121 14:06:51.339249 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de7cad07-b950-4609-b720-18e65526b93c" containerName="ceilometer-notification-agent" Nov 21 14:06:51 crc kubenswrapper[5133]: I1121 14:06:51.339266 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="de7cad07-b950-4609-b720-18e65526b93c" containerName="ceilometer-notification-agent" Nov 21 14:06:51 crc kubenswrapper[5133]: E1121 14:06:51.339277 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de7cad07-b950-4609-b720-18e65526b93c" containerName="proxy-httpd" Nov 21 14:06:51 crc kubenswrapper[5133]: I1121 14:06:51.339285 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="de7cad07-b950-4609-b720-18e65526b93c" containerName="proxy-httpd" Nov 21 14:06:51 crc kubenswrapper[5133]: E1121 14:06:51.339314 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de7cad07-b950-4609-b720-18e65526b93c" containerName="ceilometer-central-agent" Nov 21 14:06:51 crc kubenswrapper[5133]: I1121 14:06:51.339320 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="de7cad07-b950-4609-b720-18e65526b93c" containerName="ceilometer-central-agent" Nov 21 14:06:51 crc kubenswrapper[5133]: E1121 14:06:51.339335 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de7cad07-b950-4609-b720-18e65526b93c" containerName="sg-core" Nov 21 14:06:51 crc kubenswrapper[5133]: I1121 14:06:51.339341 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="de7cad07-b950-4609-b720-18e65526b93c" containerName="sg-core" Nov 21 14:06:51 crc kubenswrapper[5133]: I1121 14:06:51.339524 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="de7cad07-b950-4609-b720-18e65526b93c" containerName="ceilometer-central-agent" Nov 21 14:06:51 crc kubenswrapper[5133]: I1121 14:06:51.339534 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="de7cad07-b950-4609-b720-18e65526b93c" containerName="ceilometer-notification-agent" Nov 21 14:06:51 crc kubenswrapper[5133]: I1121 14:06:51.339548 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="de7cad07-b950-4609-b720-18e65526b93c" containerName="sg-core" Nov 21 14:06:51 crc kubenswrapper[5133]: I1121 14:06:51.339561 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="de7cad07-b950-4609-b720-18e65526b93c" containerName="proxy-httpd" Nov 21 14:06:51 crc kubenswrapper[5133]: I1121 14:06:51.341418 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 14:06:51 crc kubenswrapper[5133]: I1121 14:06:51.347800 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 21 14:06:51 crc kubenswrapper[5133]: I1121 14:06:51.348188 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 21 14:06:51 crc kubenswrapper[5133]: I1121 14:06:51.348343 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 21 14:06:51 crc kubenswrapper[5133]: I1121 14:06:51.363639 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 21 14:06:51 crc kubenswrapper[5133]: I1121 14:06:51.420133 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dfdce579-9d1a-4826-9d21-bf9061ab6c01-run-httpd\") pod \"ceilometer-0\" (UID: \"dfdce579-9d1a-4826-9d21-bf9061ab6c01\") " pod="openstack/ceilometer-0" Nov 21 14:06:51 crc kubenswrapper[5133]: I1121 14:06:51.420199 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dfdce579-9d1a-4826-9d21-bf9061ab6c01-config-data\") pod \"ceilometer-0\" (UID: \"dfdce579-9d1a-4826-9d21-bf9061ab6c01\") " pod="openstack/ceilometer-0" Nov 21 14:06:51 crc kubenswrapper[5133]: I1121 14:06:51.420282 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dfdce579-9d1a-4826-9d21-bf9061ab6c01-scripts\") pod \"ceilometer-0\" (UID: \"dfdce579-9d1a-4826-9d21-bf9061ab6c01\") " pod="openstack/ceilometer-0" Nov 21 14:06:51 crc kubenswrapper[5133]: I1121 14:06:51.420327 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/dfdce579-9d1a-4826-9d21-bf9061ab6c01-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"dfdce579-9d1a-4826-9d21-bf9061ab6c01\") " pod="openstack/ceilometer-0" Nov 21 14:06:51 crc kubenswrapper[5133]: I1121 14:06:51.420407 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/dfdce579-9d1a-4826-9d21-bf9061ab6c01-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"dfdce579-9d1a-4826-9d21-bf9061ab6c01\") " pod="openstack/ceilometer-0" Nov 21 14:06:51 crc kubenswrapper[5133]: I1121 14:06:51.420427 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dfdce579-9d1a-4826-9d21-bf9061ab6c01-log-httpd\") pod \"ceilometer-0\" (UID: \"dfdce579-9d1a-4826-9d21-bf9061ab6c01\") " pod="openstack/ceilometer-0" Nov 21 14:06:51 crc kubenswrapper[5133]: I1121 14:06:51.420500 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-thdh5\" (UniqueName: \"kubernetes.io/projected/dfdce579-9d1a-4826-9d21-bf9061ab6c01-kube-api-access-thdh5\") pod \"ceilometer-0\" (UID: \"dfdce579-9d1a-4826-9d21-bf9061ab6c01\") " pod="openstack/ceilometer-0" Nov 21 14:06:51 crc kubenswrapper[5133]: I1121 14:06:51.420522 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dfdce579-9d1a-4826-9d21-bf9061ab6c01-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"dfdce579-9d1a-4826-9d21-bf9061ab6c01\") " pod="openstack/ceilometer-0" Nov 21 14:06:51 crc kubenswrapper[5133]: I1121 14:06:51.431345 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 21 14:06:51 crc kubenswrapper[5133]: I1121 14:06:51.450576 5133 scope.go:117] "RemoveContainer" containerID="dd86b111685211ab293aa2262a44ab1bbc05a1539d44dc44687270f9ecd0dcac" Nov 21 14:06:51 crc kubenswrapper[5133]: I1121 14:06:51.515676 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Nov 21 14:06:51 crc kubenswrapper[5133]: I1121 14:06:51.521368 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4f45677f-30ca-4b41-b51b-7f78f43aca35-config-data\") pod \"4f45677f-30ca-4b41-b51b-7f78f43aca35\" (UID: \"4f45677f-30ca-4b41-b51b-7f78f43aca35\") " Nov 21 14:06:51 crc kubenswrapper[5133]: I1121 14:06:51.521416 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4f45677f-30ca-4b41-b51b-7f78f43aca35-logs\") pod \"4f45677f-30ca-4b41-b51b-7f78f43aca35\" (UID: \"4f45677f-30ca-4b41-b51b-7f78f43aca35\") " Nov 21 14:06:51 crc kubenswrapper[5133]: I1121 14:06:51.521646 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qbrf4\" (UniqueName: \"kubernetes.io/projected/4f45677f-30ca-4b41-b51b-7f78f43aca35-kube-api-access-qbrf4\") pod \"4f45677f-30ca-4b41-b51b-7f78f43aca35\" (UID: \"4f45677f-30ca-4b41-b51b-7f78f43aca35\") " Nov 21 14:06:51 crc kubenswrapper[5133]: I1121 14:06:51.521731 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f45677f-30ca-4b41-b51b-7f78f43aca35-combined-ca-bundle\") pod \"4f45677f-30ca-4b41-b51b-7f78f43aca35\" (UID: \"4f45677f-30ca-4b41-b51b-7f78f43aca35\") " Nov 21 14:06:51 crc kubenswrapper[5133]: I1121 14:06:51.522185 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dfdce579-9d1a-4826-9d21-bf9061ab6c01-config-data\") pod \"ceilometer-0\" (UID: \"dfdce579-9d1a-4826-9d21-bf9061ab6c01\") " pod="openstack/ceilometer-0" Nov 21 14:06:51 crc kubenswrapper[5133]: I1121 14:06:51.522228 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dfdce579-9d1a-4826-9d21-bf9061ab6c01-scripts\") pod \"ceilometer-0\" (UID: \"dfdce579-9d1a-4826-9d21-bf9061ab6c01\") " pod="openstack/ceilometer-0" Nov 21 14:06:51 crc kubenswrapper[5133]: I1121 14:06:51.522264 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/dfdce579-9d1a-4826-9d21-bf9061ab6c01-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"dfdce579-9d1a-4826-9d21-bf9061ab6c01\") " pod="openstack/ceilometer-0" Nov 21 14:06:51 crc kubenswrapper[5133]: I1121 14:06:51.522314 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/dfdce579-9d1a-4826-9d21-bf9061ab6c01-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"dfdce579-9d1a-4826-9d21-bf9061ab6c01\") " pod="openstack/ceilometer-0" Nov 21 14:06:51 crc kubenswrapper[5133]: I1121 14:06:51.522334 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dfdce579-9d1a-4826-9d21-bf9061ab6c01-log-httpd\") pod \"ceilometer-0\" (UID: \"dfdce579-9d1a-4826-9d21-bf9061ab6c01\") " pod="openstack/ceilometer-0" Nov 21 14:06:51 crc kubenswrapper[5133]: I1121 14:06:51.522389 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-thdh5\" (UniqueName: \"kubernetes.io/projected/dfdce579-9d1a-4826-9d21-bf9061ab6c01-kube-api-access-thdh5\") pod \"ceilometer-0\" (UID: \"dfdce579-9d1a-4826-9d21-bf9061ab6c01\") " pod="openstack/ceilometer-0" Nov 21 14:06:51 crc kubenswrapper[5133]: I1121 14:06:51.522418 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dfdce579-9d1a-4826-9d21-bf9061ab6c01-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"dfdce579-9d1a-4826-9d21-bf9061ab6c01\") " pod="openstack/ceilometer-0" Nov 21 14:06:51 crc kubenswrapper[5133]: I1121 14:06:51.522484 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dfdce579-9d1a-4826-9d21-bf9061ab6c01-run-httpd\") pod \"ceilometer-0\" (UID: \"dfdce579-9d1a-4826-9d21-bf9061ab6c01\") " pod="openstack/ceilometer-0" Nov 21 14:06:51 crc kubenswrapper[5133]: I1121 14:06:51.523104 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4f45677f-30ca-4b41-b51b-7f78f43aca35-logs" (OuterVolumeSpecName: "logs") pod "4f45677f-30ca-4b41-b51b-7f78f43aca35" (UID: "4f45677f-30ca-4b41-b51b-7f78f43aca35"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:06:51 crc kubenswrapper[5133]: I1121 14:06:51.523146 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dfdce579-9d1a-4826-9d21-bf9061ab6c01-run-httpd\") pod \"ceilometer-0\" (UID: \"dfdce579-9d1a-4826-9d21-bf9061ab6c01\") " pod="openstack/ceilometer-0" Nov 21 14:06:51 crc kubenswrapper[5133]: I1121 14:06:51.523900 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dfdce579-9d1a-4826-9d21-bf9061ab6c01-log-httpd\") pod \"ceilometer-0\" (UID: \"dfdce579-9d1a-4826-9d21-bf9061ab6c01\") " pod="openstack/ceilometer-0" Nov 21 14:06:51 crc kubenswrapper[5133]: I1121 14:06:51.531333 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dfdce579-9d1a-4826-9d21-bf9061ab6c01-config-data\") pod \"ceilometer-0\" (UID: \"dfdce579-9d1a-4826-9d21-bf9061ab6c01\") " pod="openstack/ceilometer-0" Nov 21 14:06:51 crc kubenswrapper[5133]: I1121 14:06:51.532514 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dfdce579-9d1a-4826-9d21-bf9061ab6c01-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"dfdce579-9d1a-4826-9d21-bf9061ab6c01\") " pod="openstack/ceilometer-0" Nov 21 14:06:51 crc kubenswrapper[5133]: I1121 14:06:51.542191 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4f45677f-30ca-4b41-b51b-7f78f43aca35-kube-api-access-qbrf4" (OuterVolumeSpecName: "kube-api-access-qbrf4") pod "4f45677f-30ca-4b41-b51b-7f78f43aca35" (UID: "4f45677f-30ca-4b41-b51b-7f78f43aca35"). InnerVolumeSpecName "kube-api-access-qbrf4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:06:51 crc kubenswrapper[5133]: I1121 14:06:51.545318 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/dfdce579-9d1a-4826-9d21-bf9061ab6c01-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"dfdce579-9d1a-4826-9d21-bf9061ab6c01\") " pod="openstack/ceilometer-0" Nov 21 14:06:51 crc kubenswrapper[5133]: I1121 14:06:51.549715 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/dfdce579-9d1a-4826-9d21-bf9061ab6c01-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"dfdce579-9d1a-4826-9d21-bf9061ab6c01\") " pod="openstack/ceilometer-0" Nov 21 14:06:51 crc kubenswrapper[5133]: I1121 14:06:51.550550 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dfdce579-9d1a-4826-9d21-bf9061ab6c01-scripts\") pod \"ceilometer-0\" (UID: \"dfdce579-9d1a-4826-9d21-bf9061ab6c01\") " pod="openstack/ceilometer-0" Nov 21 14:06:51 crc kubenswrapper[5133]: I1121 14:06:51.562450 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Nov 21 14:06:51 crc kubenswrapper[5133]: I1121 14:06:51.563452 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4f45677f-30ca-4b41-b51b-7f78f43aca35-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4f45677f-30ca-4b41-b51b-7f78f43aca35" (UID: "4f45677f-30ca-4b41-b51b-7f78f43aca35"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:06:51 crc kubenswrapper[5133]: I1121 14:06:51.564867 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4f45677f-30ca-4b41-b51b-7f78f43aca35-config-data" (OuterVolumeSpecName: "config-data") pod "4f45677f-30ca-4b41-b51b-7f78f43aca35" (UID: "4f45677f-30ca-4b41-b51b-7f78f43aca35"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:06:51 crc kubenswrapper[5133]: I1121 14:06:51.577833 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-thdh5\" (UniqueName: \"kubernetes.io/projected/dfdce579-9d1a-4826-9d21-bf9061ab6c01-kube-api-access-thdh5\") pod \"ceilometer-0\" (UID: \"dfdce579-9d1a-4826-9d21-bf9061ab6c01\") " pod="openstack/ceilometer-0" Nov 21 14:06:51 crc kubenswrapper[5133]: I1121 14:06:51.625063 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qbrf4\" (UniqueName: \"kubernetes.io/projected/4f45677f-30ca-4b41-b51b-7f78f43aca35-kube-api-access-qbrf4\") on node \"crc\" DevicePath \"\"" Nov 21 14:06:51 crc kubenswrapper[5133]: I1121 14:06:51.625110 5133 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f45677f-30ca-4b41-b51b-7f78f43aca35-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 14:06:51 crc kubenswrapper[5133]: I1121 14:06:51.625123 5133 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4f45677f-30ca-4b41-b51b-7f78f43aca35-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 14:06:51 crc kubenswrapper[5133]: I1121 14:06:51.625136 5133 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4f45677f-30ca-4b41-b51b-7f78f43aca35-logs\") on node \"crc\" DevicePath \"\"" Nov 21 14:06:51 crc kubenswrapper[5133]: I1121 14:06:51.742620 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 14:06:52 crc kubenswrapper[5133]: I1121 14:06:52.239382 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 21 14:06:52 crc kubenswrapper[5133]: W1121 14:06:52.243506 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddfdce579_9d1a_4826_9d21_bf9061ab6c01.slice/crio-c93cf46efdaa66a4a4fdd17f0ba941c4ad7a6b4c855bc1ab66decc4e74080ead WatchSource:0}: Error finding container c93cf46efdaa66a4a4fdd17f0ba941c4ad7a6b4c855bc1ab66decc4e74080ead: Status 404 returned error can't find the container with id c93cf46efdaa66a4a4fdd17f0ba941c4ad7a6b4c855bc1ab66decc4e74080ead Nov 21 14:06:52 crc kubenswrapper[5133]: I1121 14:06:52.278817 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"4f45677f-30ca-4b41-b51b-7f78f43aca35","Type":"ContainerDied","Data":"b8a5866be3fc60754ebdbc975c13156fcd27bdee98a740a43e31c3889cbb5956"} Nov 21 14:06:52 crc kubenswrapper[5133]: I1121 14:06:52.278862 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 21 14:06:52 crc kubenswrapper[5133]: I1121 14:06:52.278913 5133 scope.go:117] "RemoveContainer" containerID="3fb7d9bb90d270d6a8c13ea0147a9db8ad659df6f8a7fcfdd90388a0af0b1e33" Nov 21 14:06:52 crc kubenswrapper[5133]: I1121 14:06:52.279885 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"dfdce579-9d1a-4826-9d21-bf9061ab6c01","Type":"ContainerStarted","Data":"c93cf46efdaa66a4a4fdd17f0ba941c4ad7a6b4c855bc1ab66decc4e74080ead"} Nov 21 14:06:52 crc kubenswrapper[5133]: I1121 14:06:52.303364 5133 scope.go:117] "RemoveContainer" containerID="b006b0e429049c30fabb4a139e775063545ccd842780064378dd54f62d8065b3" Nov 21 14:06:52 crc kubenswrapper[5133]: I1121 14:06:52.304765 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Nov 21 14:06:52 crc kubenswrapper[5133]: I1121 14:06:52.325959 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 21 14:06:52 crc kubenswrapper[5133]: I1121 14:06:52.334699 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 21 14:06:52 crc kubenswrapper[5133]: I1121 14:06:52.360657 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 21 14:06:52 crc kubenswrapper[5133]: E1121 14:06:52.361268 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f45677f-30ca-4b41-b51b-7f78f43aca35" containerName="nova-api-log" Nov 21 14:06:52 crc kubenswrapper[5133]: I1121 14:06:52.361294 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f45677f-30ca-4b41-b51b-7f78f43aca35" containerName="nova-api-log" Nov 21 14:06:52 crc kubenswrapper[5133]: E1121 14:06:52.361330 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f45677f-30ca-4b41-b51b-7f78f43aca35" containerName="nova-api-api" Nov 21 14:06:52 crc kubenswrapper[5133]: I1121 14:06:52.361340 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f45677f-30ca-4b41-b51b-7f78f43aca35" containerName="nova-api-api" Nov 21 14:06:52 crc kubenswrapper[5133]: I1121 14:06:52.361667 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="4f45677f-30ca-4b41-b51b-7f78f43aca35" containerName="nova-api-log" Nov 21 14:06:52 crc kubenswrapper[5133]: I1121 14:06:52.361705 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="4f45677f-30ca-4b41-b51b-7f78f43aca35" containerName="nova-api-api" Nov 21 14:06:52 crc kubenswrapper[5133]: I1121 14:06:52.363081 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 21 14:06:52 crc kubenswrapper[5133]: I1121 14:06:52.366435 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 21 14:06:52 crc kubenswrapper[5133]: I1121 14:06:52.367386 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 21 14:06:52 crc kubenswrapper[5133]: I1121 14:06:52.367625 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 21 14:06:52 crc kubenswrapper[5133]: I1121 14:06:52.388731 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 21 14:06:52 crc kubenswrapper[5133]: I1121 14:06:52.444163 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/66a9911f-e3b9-4402-ab01-a5cf1c1c70a6-config-data\") pod \"nova-api-0\" (UID: \"66a9911f-e3b9-4402-ab01-a5cf1c1c70a6\") " pod="openstack/nova-api-0" Nov 21 14:06:52 crc kubenswrapper[5133]: I1121 14:06:52.444419 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/66a9911f-e3b9-4402-ab01-a5cf1c1c70a6-logs\") pod \"nova-api-0\" (UID: \"66a9911f-e3b9-4402-ab01-a5cf1c1c70a6\") " pod="openstack/nova-api-0" Nov 21 14:06:52 crc kubenswrapper[5133]: I1121 14:06:52.444536 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zgxn8\" (UniqueName: \"kubernetes.io/projected/66a9911f-e3b9-4402-ab01-a5cf1c1c70a6-kube-api-access-zgxn8\") pod \"nova-api-0\" (UID: \"66a9911f-e3b9-4402-ab01-a5cf1c1c70a6\") " pod="openstack/nova-api-0" Nov 21 14:06:52 crc kubenswrapper[5133]: I1121 14:06:52.444637 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/66a9911f-e3b9-4402-ab01-a5cf1c1c70a6-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"66a9911f-e3b9-4402-ab01-a5cf1c1c70a6\") " pod="openstack/nova-api-0" Nov 21 14:06:52 crc kubenswrapper[5133]: I1121 14:06:52.444666 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/66a9911f-e3b9-4402-ab01-a5cf1c1c70a6-public-tls-certs\") pod \"nova-api-0\" (UID: \"66a9911f-e3b9-4402-ab01-a5cf1c1c70a6\") " pod="openstack/nova-api-0" Nov 21 14:06:52 crc kubenswrapper[5133]: I1121 14:06:52.444686 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/66a9911f-e3b9-4402-ab01-a5cf1c1c70a6-internal-tls-certs\") pod \"nova-api-0\" (UID: \"66a9911f-e3b9-4402-ab01-a5cf1c1c70a6\") " pod="openstack/nova-api-0" Nov 21 14:06:52 crc kubenswrapper[5133]: I1121 14:06:52.468147 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4f45677f-30ca-4b41-b51b-7f78f43aca35" path="/var/lib/kubelet/pods/4f45677f-30ca-4b41-b51b-7f78f43aca35/volumes" Nov 21 14:06:52 crc kubenswrapper[5133]: I1121 14:06:52.468821 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="de7cad07-b950-4609-b720-18e65526b93c" path="/var/lib/kubelet/pods/de7cad07-b950-4609-b720-18e65526b93c/volumes" Nov 21 14:06:52 crc kubenswrapper[5133]: I1121 14:06:52.534822 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-mh8s9"] Nov 21 14:06:52 crc kubenswrapper[5133]: I1121 14:06:52.535900 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-mh8s9" Nov 21 14:06:52 crc kubenswrapper[5133]: I1121 14:06:52.539984 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Nov 21 14:06:52 crc kubenswrapper[5133]: I1121 14:06:52.540212 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Nov 21 14:06:52 crc kubenswrapper[5133]: I1121 14:06:52.546768 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-mh8s9"] Nov 21 14:06:52 crc kubenswrapper[5133]: I1121 14:06:52.548040 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/66a9911f-e3b9-4402-ab01-a5cf1c1c70a6-config-data\") pod \"nova-api-0\" (UID: \"66a9911f-e3b9-4402-ab01-a5cf1c1c70a6\") " pod="openstack/nova-api-0" Nov 21 14:06:52 crc kubenswrapper[5133]: I1121 14:06:52.548083 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/66a9911f-e3b9-4402-ab01-a5cf1c1c70a6-logs\") pod \"nova-api-0\" (UID: \"66a9911f-e3b9-4402-ab01-a5cf1c1c70a6\") " pod="openstack/nova-api-0" Nov 21 14:06:52 crc kubenswrapper[5133]: I1121 14:06:52.548202 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zgxn8\" (UniqueName: \"kubernetes.io/projected/66a9911f-e3b9-4402-ab01-a5cf1c1c70a6-kube-api-access-zgxn8\") pod \"nova-api-0\" (UID: \"66a9911f-e3b9-4402-ab01-a5cf1c1c70a6\") " pod="openstack/nova-api-0" Nov 21 14:06:52 crc kubenswrapper[5133]: I1121 14:06:52.548253 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/66a9911f-e3b9-4402-ab01-a5cf1c1c70a6-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"66a9911f-e3b9-4402-ab01-a5cf1c1c70a6\") " pod="openstack/nova-api-0" Nov 21 14:06:52 crc kubenswrapper[5133]: I1121 14:06:52.548273 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/66a9911f-e3b9-4402-ab01-a5cf1c1c70a6-public-tls-certs\") pod \"nova-api-0\" (UID: \"66a9911f-e3b9-4402-ab01-a5cf1c1c70a6\") " pod="openstack/nova-api-0" Nov 21 14:06:52 crc kubenswrapper[5133]: I1121 14:06:52.548294 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/66a9911f-e3b9-4402-ab01-a5cf1c1c70a6-internal-tls-certs\") pod \"nova-api-0\" (UID: \"66a9911f-e3b9-4402-ab01-a5cf1c1c70a6\") " pod="openstack/nova-api-0" Nov 21 14:06:52 crc kubenswrapper[5133]: I1121 14:06:52.548774 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/66a9911f-e3b9-4402-ab01-a5cf1c1c70a6-logs\") pod \"nova-api-0\" (UID: \"66a9911f-e3b9-4402-ab01-a5cf1c1c70a6\") " pod="openstack/nova-api-0" Nov 21 14:06:52 crc kubenswrapper[5133]: I1121 14:06:52.553549 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/66a9911f-e3b9-4402-ab01-a5cf1c1c70a6-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"66a9911f-e3b9-4402-ab01-a5cf1c1c70a6\") " pod="openstack/nova-api-0" Nov 21 14:06:52 crc kubenswrapper[5133]: I1121 14:06:52.553802 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/66a9911f-e3b9-4402-ab01-a5cf1c1c70a6-public-tls-certs\") pod \"nova-api-0\" (UID: \"66a9911f-e3b9-4402-ab01-a5cf1c1c70a6\") " pod="openstack/nova-api-0" Nov 21 14:06:52 crc kubenswrapper[5133]: I1121 14:06:52.563917 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/66a9911f-e3b9-4402-ab01-a5cf1c1c70a6-internal-tls-certs\") pod \"nova-api-0\" (UID: \"66a9911f-e3b9-4402-ab01-a5cf1c1c70a6\") " pod="openstack/nova-api-0" Nov 21 14:06:52 crc kubenswrapper[5133]: I1121 14:06:52.567613 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/66a9911f-e3b9-4402-ab01-a5cf1c1c70a6-config-data\") pod \"nova-api-0\" (UID: \"66a9911f-e3b9-4402-ab01-a5cf1c1c70a6\") " pod="openstack/nova-api-0" Nov 21 14:06:52 crc kubenswrapper[5133]: I1121 14:06:52.580823 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zgxn8\" (UniqueName: \"kubernetes.io/projected/66a9911f-e3b9-4402-ab01-a5cf1c1c70a6-kube-api-access-zgxn8\") pod \"nova-api-0\" (UID: \"66a9911f-e3b9-4402-ab01-a5cf1c1c70a6\") " pod="openstack/nova-api-0" Nov 21 14:06:52 crc kubenswrapper[5133]: I1121 14:06:52.650566 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3618a361-7c80-4f07-9375-4753adce457f-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-mh8s9\" (UID: \"3618a361-7c80-4f07-9375-4753adce457f\") " pod="openstack/nova-cell1-cell-mapping-mh8s9" Nov 21 14:06:52 crc kubenswrapper[5133]: I1121 14:06:52.650630 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3618a361-7c80-4f07-9375-4753adce457f-scripts\") pod \"nova-cell1-cell-mapping-mh8s9\" (UID: \"3618a361-7c80-4f07-9375-4753adce457f\") " pod="openstack/nova-cell1-cell-mapping-mh8s9" Nov 21 14:06:52 crc kubenswrapper[5133]: I1121 14:06:52.650728 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3618a361-7c80-4f07-9375-4753adce457f-config-data\") pod \"nova-cell1-cell-mapping-mh8s9\" (UID: \"3618a361-7c80-4f07-9375-4753adce457f\") " pod="openstack/nova-cell1-cell-mapping-mh8s9" Nov 21 14:06:52 crc kubenswrapper[5133]: I1121 14:06:52.650797 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qrv66\" (UniqueName: \"kubernetes.io/projected/3618a361-7c80-4f07-9375-4753adce457f-kube-api-access-qrv66\") pod \"nova-cell1-cell-mapping-mh8s9\" (UID: \"3618a361-7c80-4f07-9375-4753adce457f\") " pod="openstack/nova-cell1-cell-mapping-mh8s9" Nov 21 14:06:52 crc kubenswrapper[5133]: I1121 14:06:52.682362 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 21 14:06:52 crc kubenswrapper[5133]: I1121 14:06:52.753545 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qrv66\" (UniqueName: \"kubernetes.io/projected/3618a361-7c80-4f07-9375-4753adce457f-kube-api-access-qrv66\") pod \"nova-cell1-cell-mapping-mh8s9\" (UID: \"3618a361-7c80-4f07-9375-4753adce457f\") " pod="openstack/nova-cell1-cell-mapping-mh8s9" Nov 21 14:06:52 crc kubenswrapper[5133]: I1121 14:06:52.754311 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3618a361-7c80-4f07-9375-4753adce457f-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-mh8s9\" (UID: \"3618a361-7c80-4f07-9375-4753adce457f\") " pod="openstack/nova-cell1-cell-mapping-mh8s9" Nov 21 14:06:52 crc kubenswrapper[5133]: I1121 14:06:52.754360 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3618a361-7c80-4f07-9375-4753adce457f-scripts\") pod \"nova-cell1-cell-mapping-mh8s9\" (UID: \"3618a361-7c80-4f07-9375-4753adce457f\") " pod="openstack/nova-cell1-cell-mapping-mh8s9" Nov 21 14:06:52 crc kubenswrapper[5133]: I1121 14:06:52.754470 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3618a361-7c80-4f07-9375-4753adce457f-config-data\") pod \"nova-cell1-cell-mapping-mh8s9\" (UID: \"3618a361-7c80-4f07-9375-4753adce457f\") " pod="openstack/nova-cell1-cell-mapping-mh8s9" Nov 21 14:06:52 crc kubenswrapper[5133]: I1121 14:06:52.758915 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3618a361-7c80-4f07-9375-4753adce457f-config-data\") pod \"nova-cell1-cell-mapping-mh8s9\" (UID: \"3618a361-7c80-4f07-9375-4753adce457f\") " pod="openstack/nova-cell1-cell-mapping-mh8s9" Nov 21 14:06:52 crc kubenswrapper[5133]: I1121 14:06:52.759234 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3618a361-7c80-4f07-9375-4753adce457f-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-mh8s9\" (UID: \"3618a361-7c80-4f07-9375-4753adce457f\") " pod="openstack/nova-cell1-cell-mapping-mh8s9" Nov 21 14:06:52 crc kubenswrapper[5133]: I1121 14:06:52.760630 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3618a361-7c80-4f07-9375-4753adce457f-scripts\") pod \"nova-cell1-cell-mapping-mh8s9\" (UID: \"3618a361-7c80-4f07-9375-4753adce457f\") " pod="openstack/nova-cell1-cell-mapping-mh8s9" Nov 21 14:06:52 crc kubenswrapper[5133]: I1121 14:06:52.770523 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qrv66\" (UniqueName: \"kubernetes.io/projected/3618a361-7c80-4f07-9375-4753adce457f-kube-api-access-qrv66\") pod \"nova-cell1-cell-mapping-mh8s9\" (UID: \"3618a361-7c80-4f07-9375-4753adce457f\") " pod="openstack/nova-cell1-cell-mapping-mh8s9" Nov 21 14:06:52 crc kubenswrapper[5133]: I1121 14:06:52.851829 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-mh8s9" Nov 21 14:06:53 crc kubenswrapper[5133]: I1121 14:06:53.172208 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 21 14:06:53 crc kubenswrapper[5133]: I1121 14:06:53.289284 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"66a9911f-e3b9-4402-ab01-a5cf1c1c70a6","Type":"ContainerStarted","Data":"06e26f4948913327b506106055d4bb63b48cc048b20782948266bb4bfa3e1ddb"} Nov 21 14:06:53 crc kubenswrapper[5133]: I1121 14:06:53.291872 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"dfdce579-9d1a-4826-9d21-bf9061ab6c01","Type":"ContainerStarted","Data":"d95c9f7aecd2f6f2e77f4ac97e538d655ac6060ec10fc8d6debef215c7b39153"} Nov 21 14:06:53 crc kubenswrapper[5133]: I1121 14:06:53.310248 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 14:06:53 crc kubenswrapper[5133]: I1121 14:06:53.310287 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 14:06:53 crc kubenswrapper[5133]: I1121 14:06:53.310318 5133 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" Nov 21 14:06:53 crc kubenswrapper[5133]: I1121 14:06:53.310776 5133 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"edc5f6668b324bafe1f33484d097fa3472daf7be070419d53434fd44d4c45cd9"} pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 14:06:53 crc kubenswrapper[5133]: I1121 14:06:53.310818 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" containerID="cri-o://edc5f6668b324bafe1f33484d097fa3472daf7be070419d53434fd44d4c45cd9" gracePeriod=600 Nov 21 14:06:53 crc kubenswrapper[5133]: I1121 14:06:53.386682 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-mh8s9"] Nov 21 14:06:53 crc kubenswrapper[5133]: W1121 14:06:53.392019 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3618a361_7c80_4f07_9375_4753adce457f.slice/crio-ab995e0a084d4127166b32a56ebe1793be6e24588389c6d8481995ed9c1369d4 WatchSource:0}: Error finding container ab995e0a084d4127166b32a56ebe1793be6e24588389c6d8481995ed9c1369d4: Status 404 returned error can't find the container with id ab995e0a084d4127166b32a56ebe1793be6e24588389c6d8481995ed9c1369d4 Nov 21 14:06:54 crc kubenswrapper[5133]: I1121 14:06:54.311398 5133 generic.go:334] "Generic (PLEG): container finished" podID="52f5a729-05d1-4f84-a216-1df3233af57d" containerID="edc5f6668b324bafe1f33484d097fa3472daf7be070419d53434fd44d4c45cd9" exitCode=0 Nov 21 14:06:54 crc kubenswrapper[5133]: I1121 14:06:54.311441 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" event={"ID":"52f5a729-05d1-4f84-a216-1df3233af57d","Type":"ContainerDied","Data":"edc5f6668b324bafe1f33484d097fa3472daf7be070419d53434fd44d4c45cd9"} Nov 21 14:06:54 crc kubenswrapper[5133]: I1121 14:06:54.312428 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" event={"ID":"52f5a729-05d1-4f84-a216-1df3233af57d","Type":"ContainerStarted","Data":"532d197f04bffdd4c14ef24edeeddc3a83f198653923e9096885d7f28ddecf77"} Nov 21 14:06:54 crc kubenswrapper[5133]: I1121 14:06:54.312465 5133 scope.go:117] "RemoveContainer" containerID="5883b2ffbdc225f8fc9c34f308aadba5798cb012e313b8c25cad57a148a69dbc" Nov 21 14:06:54 crc kubenswrapper[5133]: I1121 14:06:54.315211 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"66a9911f-e3b9-4402-ab01-a5cf1c1c70a6","Type":"ContainerStarted","Data":"093548322c6629012b7f0b25202782dbdf6395021a23d3ad42f8fbe946741905"} Nov 21 14:06:54 crc kubenswrapper[5133]: I1121 14:06:54.315259 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"66a9911f-e3b9-4402-ab01-a5cf1c1c70a6","Type":"ContainerStarted","Data":"b4a835f8b9fd70e6a3450a2c43a19d16dc6002cee6ba25d42955173b2c0f002d"} Nov 21 14:06:54 crc kubenswrapper[5133]: I1121 14:06:54.319237 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"dfdce579-9d1a-4826-9d21-bf9061ab6c01","Type":"ContainerStarted","Data":"e2e2f5cc83cb84ec3f7586dd5528c19097c987692643ab0877baa98e798b2010"} Nov 21 14:06:54 crc kubenswrapper[5133]: I1121 14:06:54.327643 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-mh8s9" event={"ID":"3618a361-7c80-4f07-9375-4753adce457f","Type":"ContainerStarted","Data":"5b2e2955233f1622eb06eaf249125bf1d84e07a6dd1d87f2418254a89bb31cb1"} Nov 21 14:06:54 crc kubenswrapper[5133]: I1121 14:06:54.327702 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-mh8s9" event={"ID":"3618a361-7c80-4f07-9375-4753adce457f","Type":"ContainerStarted","Data":"ab995e0a084d4127166b32a56ebe1793be6e24588389c6d8481995ed9c1369d4"} Nov 21 14:06:54 crc kubenswrapper[5133]: I1121 14:06:54.368509 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.368480506 podStartE2EDuration="2.368480506s" podCreationTimestamp="2025-11-21 14:06:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 14:06:54.358581062 +0000 UTC m=+1474.156413310" watchObservedRunningTime="2025-11-21 14:06:54.368480506 +0000 UTC m=+1474.166312744" Nov 21 14:06:54 crc kubenswrapper[5133]: I1121 14:06:54.378988 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-mh8s9" podStartSLOduration=2.378959366 podStartE2EDuration="2.378959366s" podCreationTimestamp="2025-11-21 14:06:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 14:06:54.377144758 +0000 UTC m=+1474.174977016" watchObservedRunningTime="2025-11-21 14:06:54.378959366 +0000 UTC m=+1474.176791614" Nov 21 14:06:54 crc kubenswrapper[5133]: I1121 14:06:54.646225 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-68d4b6d797-qk78s" Nov 21 14:06:54 crc kubenswrapper[5133]: I1121 14:06:54.720227 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8b8cf6657-9zpfq"] Nov 21 14:06:54 crc kubenswrapper[5133]: I1121 14:06:54.720612 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-8b8cf6657-9zpfq" podUID="d4eed958-bbdc-4a15-a42a-dfcfadd80a76" containerName="dnsmasq-dns" containerID="cri-o://ed9f187f7564012737f4b3c1a46af4c23ab0a3588e58b5e30460eca35827bf70" gracePeriod=10 Nov 21 14:06:54 crc kubenswrapper[5133]: I1121 14:06:54.862876 5133 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-8b8cf6657-9zpfq" podUID="d4eed958-bbdc-4a15-a42a-dfcfadd80a76" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.167:5353: connect: connection refused" Nov 21 14:06:55 crc kubenswrapper[5133]: I1121 14:06:55.256093 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8b8cf6657-9zpfq" Nov 21 14:06:55 crc kubenswrapper[5133]: I1121 14:06:55.325067 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8xh59\" (UniqueName: \"kubernetes.io/projected/d4eed958-bbdc-4a15-a42a-dfcfadd80a76-kube-api-access-8xh59\") pod \"d4eed958-bbdc-4a15-a42a-dfcfadd80a76\" (UID: \"d4eed958-bbdc-4a15-a42a-dfcfadd80a76\") " Nov 21 14:06:55 crc kubenswrapper[5133]: I1121 14:06:55.325280 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d4eed958-bbdc-4a15-a42a-dfcfadd80a76-dns-svc\") pod \"d4eed958-bbdc-4a15-a42a-dfcfadd80a76\" (UID: \"d4eed958-bbdc-4a15-a42a-dfcfadd80a76\") " Nov 21 14:06:55 crc kubenswrapper[5133]: I1121 14:06:55.325412 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d4eed958-bbdc-4a15-a42a-dfcfadd80a76-ovsdbserver-sb\") pod \"d4eed958-bbdc-4a15-a42a-dfcfadd80a76\" (UID: \"d4eed958-bbdc-4a15-a42a-dfcfadd80a76\") " Nov 21 14:06:55 crc kubenswrapper[5133]: I1121 14:06:55.325521 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d4eed958-bbdc-4a15-a42a-dfcfadd80a76-config\") pod \"d4eed958-bbdc-4a15-a42a-dfcfadd80a76\" (UID: \"d4eed958-bbdc-4a15-a42a-dfcfadd80a76\") " Nov 21 14:06:55 crc kubenswrapper[5133]: I1121 14:06:55.325612 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d4eed958-bbdc-4a15-a42a-dfcfadd80a76-ovsdbserver-nb\") pod \"d4eed958-bbdc-4a15-a42a-dfcfadd80a76\" (UID: \"d4eed958-bbdc-4a15-a42a-dfcfadd80a76\") " Nov 21 14:06:55 crc kubenswrapper[5133]: I1121 14:06:55.335225 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d4eed958-bbdc-4a15-a42a-dfcfadd80a76-kube-api-access-8xh59" (OuterVolumeSpecName: "kube-api-access-8xh59") pod "d4eed958-bbdc-4a15-a42a-dfcfadd80a76" (UID: "d4eed958-bbdc-4a15-a42a-dfcfadd80a76"). InnerVolumeSpecName "kube-api-access-8xh59". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:06:55 crc kubenswrapper[5133]: I1121 14:06:55.366789 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"dfdce579-9d1a-4826-9d21-bf9061ab6c01","Type":"ContainerStarted","Data":"3233b4e6edd00394373bbcbbe808a291cdd2e5077ae6b17bc26bed14d1a8616c"} Nov 21 14:06:55 crc kubenswrapper[5133]: I1121 14:06:55.368738 5133 generic.go:334] "Generic (PLEG): container finished" podID="d4eed958-bbdc-4a15-a42a-dfcfadd80a76" containerID="ed9f187f7564012737f4b3c1a46af4c23ab0a3588e58b5e30460eca35827bf70" exitCode=0 Nov 21 14:06:55 crc kubenswrapper[5133]: I1121 14:06:55.369352 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8b8cf6657-9zpfq" Nov 21 14:06:55 crc kubenswrapper[5133]: I1121 14:06:55.369377 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8b8cf6657-9zpfq" event={"ID":"d4eed958-bbdc-4a15-a42a-dfcfadd80a76","Type":"ContainerDied","Data":"ed9f187f7564012737f4b3c1a46af4c23ab0a3588e58b5e30460eca35827bf70"} Nov 21 14:06:55 crc kubenswrapper[5133]: I1121 14:06:55.370147 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8b8cf6657-9zpfq" event={"ID":"d4eed958-bbdc-4a15-a42a-dfcfadd80a76","Type":"ContainerDied","Data":"ffad53d872dd2837941b263dca7f4a9a1e8538a87ce006b604fb97b0fbc35e0f"} Nov 21 14:06:55 crc kubenswrapper[5133]: I1121 14:06:55.370186 5133 scope.go:117] "RemoveContainer" containerID="ed9f187f7564012737f4b3c1a46af4c23ab0a3588e58b5e30460eca35827bf70" Nov 21 14:06:55 crc kubenswrapper[5133]: I1121 14:06:55.394043 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d4eed958-bbdc-4a15-a42a-dfcfadd80a76-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "d4eed958-bbdc-4a15-a42a-dfcfadd80a76" (UID: "d4eed958-bbdc-4a15-a42a-dfcfadd80a76"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:06:55 crc kubenswrapper[5133]: I1121 14:06:55.399260 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d4eed958-bbdc-4a15-a42a-dfcfadd80a76-config" (OuterVolumeSpecName: "config") pod "d4eed958-bbdc-4a15-a42a-dfcfadd80a76" (UID: "d4eed958-bbdc-4a15-a42a-dfcfadd80a76"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:06:55 crc kubenswrapper[5133]: I1121 14:06:55.410324 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d4eed958-bbdc-4a15-a42a-dfcfadd80a76-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "d4eed958-bbdc-4a15-a42a-dfcfadd80a76" (UID: "d4eed958-bbdc-4a15-a42a-dfcfadd80a76"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:06:55 crc kubenswrapper[5133]: I1121 14:06:55.430373 5133 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d4eed958-bbdc-4a15-a42a-dfcfadd80a76-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 21 14:06:55 crc kubenswrapper[5133]: I1121 14:06:55.430455 5133 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d4eed958-bbdc-4a15-a42a-dfcfadd80a76-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 21 14:06:55 crc kubenswrapper[5133]: I1121 14:06:55.430471 5133 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d4eed958-bbdc-4a15-a42a-dfcfadd80a76-config\") on node \"crc\" DevicePath \"\"" Nov 21 14:06:55 crc kubenswrapper[5133]: I1121 14:06:55.430486 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8xh59\" (UniqueName: \"kubernetes.io/projected/d4eed958-bbdc-4a15-a42a-dfcfadd80a76-kube-api-access-8xh59\") on node \"crc\" DevicePath \"\"" Nov 21 14:06:55 crc kubenswrapper[5133]: I1121 14:06:55.449097 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d4eed958-bbdc-4a15-a42a-dfcfadd80a76-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "d4eed958-bbdc-4a15-a42a-dfcfadd80a76" (UID: "d4eed958-bbdc-4a15-a42a-dfcfadd80a76"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:06:55 crc kubenswrapper[5133]: I1121 14:06:55.502753 5133 scope.go:117] "RemoveContainer" containerID="ee08a7c502b0b483d220726f2355a1ae53f9c18843dcb5555d54c2543cc9afd5" Nov 21 14:06:55 crc kubenswrapper[5133]: I1121 14:06:55.524009 5133 scope.go:117] "RemoveContainer" containerID="ed9f187f7564012737f4b3c1a46af4c23ab0a3588e58b5e30460eca35827bf70" Nov 21 14:06:55 crc kubenswrapper[5133]: E1121 14:06:55.524647 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ed9f187f7564012737f4b3c1a46af4c23ab0a3588e58b5e30460eca35827bf70\": container with ID starting with ed9f187f7564012737f4b3c1a46af4c23ab0a3588e58b5e30460eca35827bf70 not found: ID does not exist" containerID="ed9f187f7564012737f4b3c1a46af4c23ab0a3588e58b5e30460eca35827bf70" Nov 21 14:06:55 crc kubenswrapper[5133]: I1121 14:06:55.524724 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ed9f187f7564012737f4b3c1a46af4c23ab0a3588e58b5e30460eca35827bf70"} err="failed to get container status \"ed9f187f7564012737f4b3c1a46af4c23ab0a3588e58b5e30460eca35827bf70\": rpc error: code = NotFound desc = could not find container \"ed9f187f7564012737f4b3c1a46af4c23ab0a3588e58b5e30460eca35827bf70\": container with ID starting with ed9f187f7564012737f4b3c1a46af4c23ab0a3588e58b5e30460eca35827bf70 not found: ID does not exist" Nov 21 14:06:55 crc kubenswrapper[5133]: I1121 14:06:55.524780 5133 scope.go:117] "RemoveContainer" containerID="ee08a7c502b0b483d220726f2355a1ae53f9c18843dcb5555d54c2543cc9afd5" Nov 21 14:06:55 crc kubenswrapper[5133]: E1121 14:06:55.525205 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ee08a7c502b0b483d220726f2355a1ae53f9c18843dcb5555d54c2543cc9afd5\": container with ID starting with ee08a7c502b0b483d220726f2355a1ae53f9c18843dcb5555d54c2543cc9afd5 not found: ID does not exist" containerID="ee08a7c502b0b483d220726f2355a1ae53f9c18843dcb5555d54c2543cc9afd5" Nov 21 14:06:55 crc kubenswrapper[5133]: I1121 14:06:55.525244 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ee08a7c502b0b483d220726f2355a1ae53f9c18843dcb5555d54c2543cc9afd5"} err="failed to get container status \"ee08a7c502b0b483d220726f2355a1ae53f9c18843dcb5555d54c2543cc9afd5\": rpc error: code = NotFound desc = could not find container \"ee08a7c502b0b483d220726f2355a1ae53f9c18843dcb5555d54c2543cc9afd5\": container with ID starting with ee08a7c502b0b483d220726f2355a1ae53f9c18843dcb5555d54c2543cc9afd5 not found: ID does not exist" Nov 21 14:06:55 crc kubenswrapper[5133]: I1121 14:06:55.534011 5133 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d4eed958-bbdc-4a15-a42a-dfcfadd80a76-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 21 14:06:55 crc kubenswrapper[5133]: I1121 14:06:55.699079 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8b8cf6657-9zpfq"] Nov 21 14:06:55 crc kubenswrapper[5133]: I1121 14:06:55.708099 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-8b8cf6657-9zpfq"] Nov 21 14:06:56 crc kubenswrapper[5133]: I1121 14:06:56.389318 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"dfdce579-9d1a-4826-9d21-bf9061ab6c01","Type":"ContainerStarted","Data":"6850ac4ba9ac559104d16b42b27615df5159483ea6e1c699bc033aec509a7794"} Nov 21 14:06:56 crc kubenswrapper[5133]: I1121 14:06:56.391325 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 21 14:06:56 crc kubenswrapper[5133]: I1121 14:06:56.424247 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.898383468 podStartE2EDuration="5.424229224s" podCreationTimestamp="2025-11-21 14:06:51 +0000 UTC" firstStartedPulling="2025-11-21 14:06:52.250584368 +0000 UTC m=+1472.048416616" lastFinishedPulling="2025-11-21 14:06:55.776430124 +0000 UTC m=+1475.574262372" observedRunningTime="2025-11-21 14:06:56.418657095 +0000 UTC m=+1476.216489353" watchObservedRunningTime="2025-11-21 14:06:56.424229224 +0000 UTC m=+1476.222061462" Nov 21 14:06:56 crc kubenswrapper[5133]: I1121 14:06:56.470713 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d4eed958-bbdc-4a15-a42a-dfcfadd80a76" path="/var/lib/kubelet/pods/d4eed958-bbdc-4a15-a42a-dfcfadd80a76/volumes" Nov 21 14:06:58 crc kubenswrapper[5133]: I1121 14:06:58.799950 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-marketplace/redhat-marketplace-jtgkd" podUID="3f8cf09c-4d75-475f-b3d9-9f0b892f0bc4" containerName="registry-server" probeResult="failure" output=< Nov 21 14:06:58 crc kubenswrapper[5133]: timeout: failed to connect service ":50051" within 1s Nov 21 14:06:58 crc kubenswrapper[5133]: > Nov 21 14:06:58 crc kubenswrapper[5133]: I1121 14:06:58.809039 5133 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/redhat-marketplace-jtgkd" podUID="3f8cf09c-4d75-475f-b3d9-9f0b892f0bc4" containerName="registry-server" probeResult="failure" output=< Nov 21 14:06:58 crc kubenswrapper[5133]: timeout: failed to connect service ":50051" within 1s Nov 21 14:06:58 crc kubenswrapper[5133]: > Nov 21 14:06:59 crc kubenswrapper[5133]: I1121 14:06:59.964066 5133 generic.go:334] "Generic (PLEG): container finished" podID="3618a361-7c80-4f07-9375-4753adce457f" containerID="5b2e2955233f1622eb06eaf249125bf1d84e07a6dd1d87f2418254a89bb31cb1" exitCode=0 Nov 21 14:06:59 crc kubenswrapper[5133]: I1121 14:06:59.964626 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-mh8s9" event={"ID":"3618a361-7c80-4f07-9375-4753adce457f","Type":"ContainerDied","Data":"5b2e2955233f1622eb06eaf249125bf1d84e07a6dd1d87f2418254a89bb31cb1"} Nov 21 14:07:01 crc kubenswrapper[5133]: I1121 14:07:01.398165 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-mh8s9" Nov 21 14:07:01 crc kubenswrapper[5133]: I1121 14:07:01.487489 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3618a361-7c80-4f07-9375-4753adce457f-config-data\") pod \"3618a361-7c80-4f07-9375-4753adce457f\" (UID: \"3618a361-7c80-4f07-9375-4753adce457f\") " Nov 21 14:07:01 crc kubenswrapper[5133]: I1121 14:07:01.487654 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3618a361-7c80-4f07-9375-4753adce457f-combined-ca-bundle\") pod \"3618a361-7c80-4f07-9375-4753adce457f\" (UID: \"3618a361-7c80-4f07-9375-4753adce457f\") " Nov 21 14:07:01 crc kubenswrapper[5133]: I1121 14:07:01.487876 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3618a361-7c80-4f07-9375-4753adce457f-scripts\") pod \"3618a361-7c80-4f07-9375-4753adce457f\" (UID: \"3618a361-7c80-4f07-9375-4753adce457f\") " Nov 21 14:07:01 crc kubenswrapper[5133]: I1121 14:07:01.487959 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qrv66\" (UniqueName: \"kubernetes.io/projected/3618a361-7c80-4f07-9375-4753adce457f-kube-api-access-qrv66\") pod \"3618a361-7c80-4f07-9375-4753adce457f\" (UID: \"3618a361-7c80-4f07-9375-4753adce457f\") " Nov 21 14:07:01 crc kubenswrapper[5133]: I1121 14:07:01.493983 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3618a361-7c80-4f07-9375-4753adce457f-scripts" (OuterVolumeSpecName: "scripts") pod "3618a361-7c80-4f07-9375-4753adce457f" (UID: "3618a361-7c80-4f07-9375-4753adce457f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:07:01 crc kubenswrapper[5133]: I1121 14:07:01.495480 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3618a361-7c80-4f07-9375-4753adce457f-kube-api-access-qrv66" (OuterVolumeSpecName: "kube-api-access-qrv66") pod "3618a361-7c80-4f07-9375-4753adce457f" (UID: "3618a361-7c80-4f07-9375-4753adce457f"). InnerVolumeSpecName "kube-api-access-qrv66". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:07:01 crc kubenswrapper[5133]: I1121 14:07:01.515897 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3618a361-7c80-4f07-9375-4753adce457f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3618a361-7c80-4f07-9375-4753adce457f" (UID: "3618a361-7c80-4f07-9375-4753adce457f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:07:01 crc kubenswrapper[5133]: I1121 14:07:01.516378 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3618a361-7c80-4f07-9375-4753adce457f-config-data" (OuterVolumeSpecName: "config-data") pod "3618a361-7c80-4f07-9375-4753adce457f" (UID: "3618a361-7c80-4f07-9375-4753adce457f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:07:01 crc kubenswrapper[5133]: I1121 14:07:01.590588 5133 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3618a361-7c80-4f07-9375-4753adce457f-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 14:07:01 crc kubenswrapper[5133]: I1121 14:07:01.590631 5133 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3618a361-7c80-4f07-9375-4753adce457f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 14:07:01 crc kubenswrapper[5133]: I1121 14:07:01.590647 5133 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3618a361-7c80-4f07-9375-4753adce457f-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 14:07:01 crc kubenswrapper[5133]: I1121 14:07:01.590659 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qrv66\" (UniqueName: \"kubernetes.io/projected/3618a361-7c80-4f07-9375-4753adce457f-kube-api-access-qrv66\") on node \"crc\" DevicePath \"\"" Nov 21 14:07:01 crc kubenswrapper[5133]: I1121 14:07:01.992049 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-mh8s9" event={"ID":"3618a361-7c80-4f07-9375-4753adce457f","Type":"ContainerDied","Data":"ab995e0a084d4127166b32a56ebe1793be6e24588389c6d8481995ed9c1369d4"} Nov 21 14:07:01 crc kubenswrapper[5133]: I1121 14:07:01.992602 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ab995e0a084d4127166b32a56ebe1793be6e24588389c6d8481995ed9c1369d4" Nov 21 14:07:01 crc kubenswrapper[5133]: I1121 14:07:01.992174 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-mh8s9" Nov 21 14:07:02 crc kubenswrapper[5133]: I1121 14:07:02.196100 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 21 14:07:02 crc kubenswrapper[5133]: I1121 14:07:02.196381 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="ed15e88e-868f-4a35-9966-6d486325db91" containerName="nova-scheduler-scheduler" containerID="cri-o://f80dfaf053872459d558aac0e0782cd60f18e1649f9aec950a194c37ee3aa585" gracePeriod=30 Nov 21 14:07:02 crc kubenswrapper[5133]: I1121 14:07:02.218075 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 21 14:07:02 crc kubenswrapper[5133]: I1121 14:07:02.218323 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="66a9911f-e3b9-4402-ab01-a5cf1c1c70a6" containerName="nova-api-log" containerID="cri-o://b4a835f8b9fd70e6a3450a2c43a19d16dc6002cee6ba25d42955173b2c0f002d" gracePeriod=30 Nov 21 14:07:02 crc kubenswrapper[5133]: I1121 14:07:02.218459 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="66a9911f-e3b9-4402-ab01-a5cf1c1c70a6" containerName="nova-api-api" containerID="cri-o://093548322c6629012b7f0b25202782dbdf6395021a23d3ad42f8fbe946741905" gracePeriod=30 Nov 21 14:07:02 crc kubenswrapper[5133]: I1121 14:07:02.255652 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 21 14:07:02 crc kubenswrapper[5133]: I1121 14:07:02.255887 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="20e07bbe-ddbc-445a-8201-2aa590f4f1a3" containerName="nova-metadata-log" containerID="cri-o://4a9eaf07ead25cdf6dea3d7f7ae153a3fd284f61f07f5e5cd31f2432bfaa2560" gracePeriod=30 Nov 21 14:07:02 crc kubenswrapper[5133]: I1121 14:07:02.255942 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="20e07bbe-ddbc-445a-8201-2aa590f4f1a3" containerName="nova-metadata-metadata" containerID="cri-o://c5a0caca50a2fd71756d9ecfee5e5d6b206fa80f2755bc92cd4696e86b873f92" gracePeriod=30 Nov 21 14:07:02 crc kubenswrapper[5133]: I1121 14:07:02.841775 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.004075 5133 generic.go:334] "Generic (PLEG): container finished" podID="66a9911f-e3b9-4402-ab01-a5cf1c1c70a6" containerID="093548322c6629012b7f0b25202782dbdf6395021a23d3ad42f8fbe946741905" exitCode=0 Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.004121 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.004127 5133 generic.go:334] "Generic (PLEG): container finished" podID="66a9911f-e3b9-4402-ab01-a5cf1c1c70a6" containerID="b4a835f8b9fd70e6a3450a2c43a19d16dc6002cee6ba25d42955173b2c0f002d" exitCode=143 Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.004155 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"66a9911f-e3b9-4402-ab01-a5cf1c1c70a6","Type":"ContainerDied","Data":"093548322c6629012b7f0b25202782dbdf6395021a23d3ad42f8fbe946741905"} Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.004217 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"66a9911f-e3b9-4402-ab01-a5cf1c1c70a6","Type":"ContainerDied","Data":"b4a835f8b9fd70e6a3450a2c43a19d16dc6002cee6ba25d42955173b2c0f002d"} Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.004234 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"66a9911f-e3b9-4402-ab01-a5cf1c1c70a6","Type":"ContainerDied","Data":"06e26f4948913327b506106055d4bb63b48cc048b20782948266bb4bfa3e1ddb"} Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.004256 5133 scope.go:117] "RemoveContainer" containerID="093548322c6629012b7f0b25202782dbdf6395021a23d3ad42f8fbe946741905" Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.006673 5133 generic.go:334] "Generic (PLEG): container finished" podID="20e07bbe-ddbc-445a-8201-2aa590f4f1a3" containerID="4a9eaf07ead25cdf6dea3d7f7ae153a3fd284f61f07f5e5cd31f2432bfaa2560" exitCode=143 Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.006710 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"20e07bbe-ddbc-445a-8201-2aa590f4f1a3","Type":"ContainerDied","Data":"4a9eaf07ead25cdf6dea3d7f7ae153a3fd284f61f07f5e5cd31f2432bfaa2560"} Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.023354 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/66a9911f-e3b9-4402-ab01-a5cf1c1c70a6-combined-ca-bundle\") pod \"66a9911f-e3b9-4402-ab01-a5cf1c1c70a6\" (UID: \"66a9911f-e3b9-4402-ab01-a5cf1c1c70a6\") " Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.023425 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/66a9911f-e3b9-4402-ab01-a5cf1c1c70a6-config-data\") pod \"66a9911f-e3b9-4402-ab01-a5cf1c1c70a6\" (UID: \"66a9911f-e3b9-4402-ab01-a5cf1c1c70a6\") " Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.023448 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/66a9911f-e3b9-4402-ab01-a5cf1c1c70a6-public-tls-certs\") pod \"66a9911f-e3b9-4402-ab01-a5cf1c1c70a6\" (UID: \"66a9911f-e3b9-4402-ab01-a5cf1c1c70a6\") " Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.023501 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/66a9911f-e3b9-4402-ab01-a5cf1c1c70a6-internal-tls-certs\") pod \"66a9911f-e3b9-4402-ab01-a5cf1c1c70a6\" (UID: \"66a9911f-e3b9-4402-ab01-a5cf1c1c70a6\") " Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.023578 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/66a9911f-e3b9-4402-ab01-a5cf1c1c70a6-logs\") pod \"66a9911f-e3b9-4402-ab01-a5cf1c1c70a6\" (UID: \"66a9911f-e3b9-4402-ab01-a5cf1c1c70a6\") " Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.023666 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgxn8\" (UniqueName: \"kubernetes.io/projected/66a9911f-e3b9-4402-ab01-a5cf1c1c70a6-kube-api-access-zgxn8\") pod \"66a9911f-e3b9-4402-ab01-a5cf1c1c70a6\" (UID: \"66a9911f-e3b9-4402-ab01-a5cf1c1c70a6\") " Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.024078 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/66a9911f-e3b9-4402-ab01-a5cf1c1c70a6-logs" (OuterVolumeSpecName: "logs") pod "66a9911f-e3b9-4402-ab01-a5cf1c1c70a6" (UID: "66a9911f-e3b9-4402-ab01-a5cf1c1c70a6"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.024504 5133 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/66a9911f-e3b9-4402-ab01-a5cf1c1c70a6-logs\") on node \"crc\" DevicePath \"\"" Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.031285 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/66a9911f-e3b9-4402-ab01-a5cf1c1c70a6-kube-api-access-zgxn8" (OuterVolumeSpecName: "kube-api-access-zgxn8") pod "66a9911f-e3b9-4402-ab01-a5cf1c1c70a6" (UID: "66a9911f-e3b9-4402-ab01-a5cf1c1c70a6"). InnerVolumeSpecName "kube-api-access-zgxn8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.035141 5133 scope.go:117] "RemoveContainer" containerID="b4a835f8b9fd70e6a3450a2c43a19d16dc6002cee6ba25d42955173b2c0f002d" Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.052398 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/66a9911f-e3b9-4402-ab01-a5cf1c1c70a6-config-data" (OuterVolumeSpecName: "config-data") pod "66a9911f-e3b9-4402-ab01-a5cf1c1c70a6" (UID: "66a9911f-e3b9-4402-ab01-a5cf1c1c70a6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.053737 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/66a9911f-e3b9-4402-ab01-a5cf1c1c70a6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "66a9911f-e3b9-4402-ab01-a5cf1c1c70a6" (UID: "66a9911f-e3b9-4402-ab01-a5cf1c1c70a6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.058751 5133 scope.go:117] "RemoveContainer" containerID="093548322c6629012b7f0b25202782dbdf6395021a23d3ad42f8fbe946741905" Nov 21 14:07:03 crc kubenswrapper[5133]: E1121 14:07:03.059191 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"093548322c6629012b7f0b25202782dbdf6395021a23d3ad42f8fbe946741905\": container with ID starting with 093548322c6629012b7f0b25202782dbdf6395021a23d3ad42f8fbe946741905 not found: ID does not exist" containerID="093548322c6629012b7f0b25202782dbdf6395021a23d3ad42f8fbe946741905" Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.059241 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"093548322c6629012b7f0b25202782dbdf6395021a23d3ad42f8fbe946741905"} err="failed to get container status \"093548322c6629012b7f0b25202782dbdf6395021a23d3ad42f8fbe946741905\": rpc error: code = NotFound desc = could not find container \"093548322c6629012b7f0b25202782dbdf6395021a23d3ad42f8fbe946741905\": container with ID starting with 093548322c6629012b7f0b25202782dbdf6395021a23d3ad42f8fbe946741905 not found: ID does not exist" Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.059295 5133 scope.go:117] "RemoveContainer" containerID="b4a835f8b9fd70e6a3450a2c43a19d16dc6002cee6ba25d42955173b2c0f002d" Nov 21 14:07:03 crc kubenswrapper[5133]: E1121 14:07:03.059608 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b4a835f8b9fd70e6a3450a2c43a19d16dc6002cee6ba25d42955173b2c0f002d\": container with ID starting with b4a835f8b9fd70e6a3450a2c43a19d16dc6002cee6ba25d42955173b2c0f002d not found: ID does not exist" containerID="b4a835f8b9fd70e6a3450a2c43a19d16dc6002cee6ba25d42955173b2c0f002d" Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.059642 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b4a835f8b9fd70e6a3450a2c43a19d16dc6002cee6ba25d42955173b2c0f002d"} err="failed to get container status \"b4a835f8b9fd70e6a3450a2c43a19d16dc6002cee6ba25d42955173b2c0f002d\": rpc error: code = NotFound desc = could not find container \"b4a835f8b9fd70e6a3450a2c43a19d16dc6002cee6ba25d42955173b2c0f002d\": container with ID starting with b4a835f8b9fd70e6a3450a2c43a19d16dc6002cee6ba25d42955173b2c0f002d not found: ID does not exist" Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.059719 5133 scope.go:117] "RemoveContainer" containerID="093548322c6629012b7f0b25202782dbdf6395021a23d3ad42f8fbe946741905" Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.060312 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"093548322c6629012b7f0b25202782dbdf6395021a23d3ad42f8fbe946741905"} err="failed to get container status \"093548322c6629012b7f0b25202782dbdf6395021a23d3ad42f8fbe946741905\": rpc error: code = NotFound desc = could not find container \"093548322c6629012b7f0b25202782dbdf6395021a23d3ad42f8fbe946741905\": container with ID starting with 093548322c6629012b7f0b25202782dbdf6395021a23d3ad42f8fbe946741905 not found: ID does not exist" Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.060331 5133 scope.go:117] "RemoveContainer" containerID="b4a835f8b9fd70e6a3450a2c43a19d16dc6002cee6ba25d42955173b2c0f002d" Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.060575 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b4a835f8b9fd70e6a3450a2c43a19d16dc6002cee6ba25d42955173b2c0f002d"} err="failed to get container status \"b4a835f8b9fd70e6a3450a2c43a19d16dc6002cee6ba25d42955173b2c0f002d\": rpc error: code = NotFound desc = could not find container \"b4a835f8b9fd70e6a3450a2c43a19d16dc6002cee6ba25d42955173b2c0f002d\": container with ID starting with b4a835f8b9fd70e6a3450a2c43a19d16dc6002cee6ba25d42955173b2c0f002d not found: ID does not exist" Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.081173 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/66a9911f-e3b9-4402-ab01-a5cf1c1c70a6-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "66a9911f-e3b9-4402-ab01-a5cf1c1c70a6" (UID: "66a9911f-e3b9-4402-ab01-a5cf1c1c70a6"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.084658 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/66a9911f-e3b9-4402-ab01-a5cf1c1c70a6-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "66a9911f-e3b9-4402-ab01-a5cf1c1c70a6" (UID: "66a9911f-e3b9-4402-ab01-a5cf1c1c70a6"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.126065 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgxn8\" (UniqueName: \"kubernetes.io/projected/66a9911f-e3b9-4402-ab01-a5cf1c1c70a6-kube-api-access-zgxn8\") on node \"crc\" DevicePath \"\"" Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.126124 5133 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/66a9911f-e3b9-4402-ab01-a5cf1c1c70a6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.126134 5133 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/66a9911f-e3b9-4402-ab01-a5cf1c1c70a6-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.126143 5133 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/66a9911f-e3b9-4402-ab01-a5cf1c1c70a6-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.126154 5133 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/66a9911f-e3b9-4402-ab01-a5cf1c1c70a6-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 21 14:07:03 crc kubenswrapper[5133]: E1121 14:07:03.200068 5133 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="f80dfaf053872459d558aac0e0782cd60f18e1649f9aec950a194c37ee3aa585" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 21 14:07:03 crc kubenswrapper[5133]: E1121 14:07:03.202277 5133 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="f80dfaf053872459d558aac0e0782cd60f18e1649f9aec950a194c37ee3aa585" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 21 14:07:03 crc kubenswrapper[5133]: E1121 14:07:03.203745 5133 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="f80dfaf053872459d558aac0e0782cd60f18e1649f9aec950a194c37ee3aa585" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 21 14:07:03 crc kubenswrapper[5133]: E1121 14:07:03.203786 5133 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="ed15e88e-868f-4a35-9966-6d486325db91" containerName="nova-scheduler-scheduler" Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.354226 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.364942 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.382905 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 21 14:07:03 crc kubenswrapper[5133]: E1121 14:07:03.383476 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3618a361-7c80-4f07-9375-4753adce457f" containerName="nova-manage" Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.383502 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="3618a361-7c80-4f07-9375-4753adce457f" containerName="nova-manage" Nov 21 14:07:03 crc kubenswrapper[5133]: E1121 14:07:03.383518 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66a9911f-e3b9-4402-ab01-a5cf1c1c70a6" containerName="nova-api-api" Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.383529 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="66a9911f-e3b9-4402-ab01-a5cf1c1c70a6" containerName="nova-api-api" Nov 21 14:07:03 crc kubenswrapper[5133]: E1121 14:07:03.383554 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d4eed958-bbdc-4a15-a42a-dfcfadd80a76" containerName="init" Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.383566 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="d4eed958-bbdc-4a15-a42a-dfcfadd80a76" containerName="init" Nov 21 14:07:03 crc kubenswrapper[5133]: E1121 14:07:03.383592 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66a9911f-e3b9-4402-ab01-a5cf1c1c70a6" containerName="nova-api-log" Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.383603 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="66a9911f-e3b9-4402-ab01-a5cf1c1c70a6" containerName="nova-api-log" Nov 21 14:07:03 crc kubenswrapper[5133]: E1121 14:07:03.383628 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d4eed958-bbdc-4a15-a42a-dfcfadd80a76" containerName="dnsmasq-dns" Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.383643 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="d4eed958-bbdc-4a15-a42a-dfcfadd80a76" containerName="dnsmasq-dns" Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.383983 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="66a9911f-e3b9-4402-ab01-a5cf1c1c70a6" containerName="nova-api-log" Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.384040 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="3618a361-7c80-4f07-9375-4753adce457f" containerName="nova-manage" Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.384084 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="66a9911f-e3b9-4402-ab01-a5cf1c1c70a6" containerName="nova-api-api" Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.384100 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="d4eed958-bbdc-4a15-a42a-dfcfadd80a76" containerName="dnsmasq-dns" Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.385932 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.389847 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.390463 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.390508 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.396427 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.535059 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/22807ad8-e61a-4708-8dda-5863f054c746-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"22807ad8-e61a-4708-8dda-5863f054c746\") " pod="openstack/nova-api-0" Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.535136 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f7gkc\" (UniqueName: \"kubernetes.io/projected/22807ad8-e61a-4708-8dda-5863f054c746-kube-api-access-f7gkc\") pod \"nova-api-0\" (UID: \"22807ad8-e61a-4708-8dda-5863f054c746\") " pod="openstack/nova-api-0" Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.535211 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/22807ad8-e61a-4708-8dda-5863f054c746-public-tls-certs\") pod \"nova-api-0\" (UID: \"22807ad8-e61a-4708-8dda-5863f054c746\") " pod="openstack/nova-api-0" Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.535264 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/22807ad8-e61a-4708-8dda-5863f054c746-config-data\") pod \"nova-api-0\" (UID: \"22807ad8-e61a-4708-8dda-5863f054c746\") " pod="openstack/nova-api-0" Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.535368 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/22807ad8-e61a-4708-8dda-5863f054c746-logs\") pod \"nova-api-0\" (UID: \"22807ad8-e61a-4708-8dda-5863f054c746\") " pod="openstack/nova-api-0" Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.535416 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/22807ad8-e61a-4708-8dda-5863f054c746-internal-tls-certs\") pod \"nova-api-0\" (UID: \"22807ad8-e61a-4708-8dda-5863f054c746\") " pod="openstack/nova-api-0" Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.636763 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/22807ad8-e61a-4708-8dda-5863f054c746-logs\") pod \"nova-api-0\" (UID: \"22807ad8-e61a-4708-8dda-5863f054c746\") " pod="openstack/nova-api-0" Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.636816 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/22807ad8-e61a-4708-8dda-5863f054c746-internal-tls-certs\") pod \"nova-api-0\" (UID: \"22807ad8-e61a-4708-8dda-5863f054c746\") " pod="openstack/nova-api-0" Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.636976 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/22807ad8-e61a-4708-8dda-5863f054c746-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"22807ad8-e61a-4708-8dda-5863f054c746\") " pod="openstack/nova-api-0" Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.637020 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f7gkc\" (UniqueName: \"kubernetes.io/projected/22807ad8-e61a-4708-8dda-5863f054c746-kube-api-access-f7gkc\") pod \"nova-api-0\" (UID: \"22807ad8-e61a-4708-8dda-5863f054c746\") " pod="openstack/nova-api-0" Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.637063 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/22807ad8-e61a-4708-8dda-5863f054c746-public-tls-certs\") pod \"nova-api-0\" (UID: \"22807ad8-e61a-4708-8dda-5863f054c746\") " pod="openstack/nova-api-0" Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.637287 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/22807ad8-e61a-4708-8dda-5863f054c746-config-data\") pod \"nova-api-0\" (UID: \"22807ad8-e61a-4708-8dda-5863f054c746\") " pod="openstack/nova-api-0" Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.637418 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/22807ad8-e61a-4708-8dda-5863f054c746-logs\") pod \"nova-api-0\" (UID: \"22807ad8-e61a-4708-8dda-5863f054c746\") " pod="openstack/nova-api-0" Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.641550 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/22807ad8-e61a-4708-8dda-5863f054c746-internal-tls-certs\") pod \"nova-api-0\" (UID: \"22807ad8-e61a-4708-8dda-5863f054c746\") " pod="openstack/nova-api-0" Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.641796 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/22807ad8-e61a-4708-8dda-5863f054c746-config-data\") pod \"nova-api-0\" (UID: \"22807ad8-e61a-4708-8dda-5863f054c746\") " pod="openstack/nova-api-0" Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.642952 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/22807ad8-e61a-4708-8dda-5863f054c746-public-tls-certs\") pod \"nova-api-0\" (UID: \"22807ad8-e61a-4708-8dda-5863f054c746\") " pod="openstack/nova-api-0" Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.647228 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/22807ad8-e61a-4708-8dda-5863f054c746-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"22807ad8-e61a-4708-8dda-5863f054c746\") " pod="openstack/nova-api-0" Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.684502 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f7gkc\" (UniqueName: \"kubernetes.io/projected/22807ad8-e61a-4708-8dda-5863f054c746-kube-api-access-f7gkc\") pod \"nova-api-0\" (UID: \"22807ad8-e61a-4708-8dda-5863f054c746\") " pod="openstack/nova-api-0" Nov 21 14:07:03 crc kubenswrapper[5133]: I1121 14:07:03.751618 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 21 14:07:04 crc kubenswrapper[5133]: I1121 14:07:04.216826 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 21 14:07:04 crc kubenswrapper[5133]: W1121 14:07:04.224046 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod22807ad8_e61a_4708_8dda_5863f054c746.slice/crio-505e4cdb5eccd95d72be910ddad35e76a03f9ffd04af18c306e8d58c6b6fd8b3 WatchSource:0}: Error finding container 505e4cdb5eccd95d72be910ddad35e76a03f9ffd04af18c306e8d58c6b6fd8b3: Status 404 returned error can't find the container with id 505e4cdb5eccd95d72be910ddad35e76a03f9ffd04af18c306e8d58c6b6fd8b3 Nov 21 14:07:04 crc kubenswrapper[5133]: I1121 14:07:04.471480 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="66a9911f-e3b9-4402-ab01-a5cf1c1c70a6" path="/var/lib/kubelet/pods/66a9911f-e3b9-4402-ab01-a5cf1c1c70a6/volumes" Nov 21 14:07:05 crc kubenswrapper[5133]: I1121 14:07:05.031760 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"22807ad8-e61a-4708-8dda-5863f054c746","Type":"ContainerStarted","Data":"3a9c88ab1aa148c5113826b2467eceb148d74249fcfa8f2ded6de4b28a1f7c13"} Nov 21 14:07:05 crc kubenswrapper[5133]: I1121 14:07:05.032275 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"22807ad8-e61a-4708-8dda-5863f054c746","Type":"ContainerStarted","Data":"ccb91c7ee507d2ef7a62f51a5e8778fadb0f7f49d2a00f3effbd01b5787a968e"} Nov 21 14:07:05 crc kubenswrapper[5133]: I1121 14:07:05.032294 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"22807ad8-e61a-4708-8dda-5863f054c746","Type":"ContainerStarted","Data":"505e4cdb5eccd95d72be910ddad35e76a03f9ffd04af18c306e8d58c6b6fd8b3"} Nov 21 14:07:05 crc kubenswrapper[5133]: I1121 14:07:05.068278 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.068259689 podStartE2EDuration="2.068259689s" podCreationTimestamp="2025-11-21 14:07:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 14:07:05.061272462 +0000 UTC m=+1484.859104710" watchObservedRunningTime="2025-11-21 14:07:05.068259689 +0000 UTC m=+1484.866091937" Nov 21 14:07:05 crc kubenswrapper[5133]: I1121 14:07:05.396156 5133 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="20e07bbe-ddbc-445a-8201-2aa590f4f1a3" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.172:8775/\": read tcp 10.217.0.2:46710->10.217.0.172:8775: read: connection reset by peer" Nov 21 14:07:05 crc kubenswrapper[5133]: I1121 14:07:05.397243 5133 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="20e07bbe-ddbc-445a-8201-2aa590f4f1a3" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.172:8775/\": read tcp 10.217.0.2:46720->10.217.0.172:8775: read: connection reset by peer" Nov 21 14:07:05 crc kubenswrapper[5133]: I1121 14:07:05.840009 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 21 14:07:05 crc kubenswrapper[5133]: I1121 14:07:05.987785 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/20e07bbe-ddbc-445a-8201-2aa590f4f1a3-combined-ca-bundle\") pod \"20e07bbe-ddbc-445a-8201-2aa590f4f1a3\" (UID: \"20e07bbe-ddbc-445a-8201-2aa590f4f1a3\") " Nov 21 14:07:05 crc kubenswrapper[5133]: I1121 14:07:05.987866 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/20e07bbe-ddbc-445a-8201-2aa590f4f1a3-logs\") pod \"20e07bbe-ddbc-445a-8201-2aa590f4f1a3\" (UID: \"20e07bbe-ddbc-445a-8201-2aa590f4f1a3\") " Nov 21 14:07:05 crc kubenswrapper[5133]: I1121 14:07:05.987955 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/20e07bbe-ddbc-445a-8201-2aa590f4f1a3-nova-metadata-tls-certs\") pod \"20e07bbe-ddbc-445a-8201-2aa590f4f1a3\" (UID: \"20e07bbe-ddbc-445a-8201-2aa590f4f1a3\") " Nov 21 14:07:05 crc kubenswrapper[5133]: I1121 14:07:05.988036 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkrbz\" (UniqueName: \"kubernetes.io/projected/20e07bbe-ddbc-445a-8201-2aa590f4f1a3-kube-api-access-jkrbz\") pod \"20e07bbe-ddbc-445a-8201-2aa590f4f1a3\" (UID: \"20e07bbe-ddbc-445a-8201-2aa590f4f1a3\") " Nov 21 14:07:05 crc kubenswrapper[5133]: I1121 14:07:05.988061 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/20e07bbe-ddbc-445a-8201-2aa590f4f1a3-config-data\") pod \"20e07bbe-ddbc-445a-8201-2aa590f4f1a3\" (UID: \"20e07bbe-ddbc-445a-8201-2aa590f4f1a3\") " Nov 21 14:07:05 crc kubenswrapper[5133]: I1121 14:07:05.988793 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/20e07bbe-ddbc-445a-8201-2aa590f4f1a3-logs" (OuterVolumeSpecName: "logs") pod "20e07bbe-ddbc-445a-8201-2aa590f4f1a3" (UID: "20e07bbe-ddbc-445a-8201-2aa590f4f1a3"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:07:05 crc kubenswrapper[5133]: I1121 14:07:05.994816 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20e07bbe-ddbc-445a-8201-2aa590f4f1a3-kube-api-access-jkrbz" (OuterVolumeSpecName: "kube-api-access-jkrbz") pod "20e07bbe-ddbc-445a-8201-2aa590f4f1a3" (UID: "20e07bbe-ddbc-445a-8201-2aa590f4f1a3"). InnerVolumeSpecName "kube-api-access-jkrbz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:07:06 crc kubenswrapper[5133]: I1121 14:07:06.018477 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20e07bbe-ddbc-445a-8201-2aa590f4f1a3-config-data" (OuterVolumeSpecName: "config-data") pod "20e07bbe-ddbc-445a-8201-2aa590f4f1a3" (UID: "20e07bbe-ddbc-445a-8201-2aa590f4f1a3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:07:06 crc kubenswrapper[5133]: I1121 14:07:06.024463 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20e07bbe-ddbc-445a-8201-2aa590f4f1a3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "20e07bbe-ddbc-445a-8201-2aa590f4f1a3" (UID: "20e07bbe-ddbc-445a-8201-2aa590f4f1a3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:07:06 crc kubenswrapper[5133]: I1121 14:07:06.040876 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20e07bbe-ddbc-445a-8201-2aa590f4f1a3-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "20e07bbe-ddbc-445a-8201-2aa590f4f1a3" (UID: "20e07bbe-ddbc-445a-8201-2aa590f4f1a3"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:07:06 crc kubenswrapper[5133]: I1121 14:07:06.041571 5133 generic.go:334] "Generic (PLEG): container finished" podID="20e07bbe-ddbc-445a-8201-2aa590f4f1a3" containerID="c5a0caca50a2fd71756d9ecfee5e5d6b206fa80f2755bc92cd4696e86b873f92" exitCode=0 Nov 21 14:07:06 crc kubenswrapper[5133]: I1121 14:07:06.041662 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"20e07bbe-ddbc-445a-8201-2aa590f4f1a3","Type":"ContainerDied","Data":"c5a0caca50a2fd71756d9ecfee5e5d6b206fa80f2755bc92cd4696e86b873f92"} Nov 21 14:07:06 crc kubenswrapper[5133]: I1121 14:07:06.041706 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 21 14:07:06 crc kubenswrapper[5133]: I1121 14:07:06.041734 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"20e07bbe-ddbc-445a-8201-2aa590f4f1a3","Type":"ContainerDied","Data":"1a876b66b29c738220b932b5010dfae60f2dab1900a3557fc7da509f3c52c157"} Nov 21 14:07:06 crc kubenswrapper[5133]: I1121 14:07:06.041757 5133 scope.go:117] "RemoveContainer" containerID="c5a0caca50a2fd71756d9ecfee5e5d6b206fa80f2755bc92cd4696e86b873f92" Nov 21 14:07:06 crc kubenswrapper[5133]: I1121 14:07:06.090181 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkrbz\" (UniqueName: \"kubernetes.io/projected/20e07bbe-ddbc-445a-8201-2aa590f4f1a3-kube-api-access-jkrbz\") on node \"crc\" DevicePath \"\"" Nov 21 14:07:06 crc kubenswrapper[5133]: I1121 14:07:06.090210 5133 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/20e07bbe-ddbc-445a-8201-2aa590f4f1a3-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 14:07:06 crc kubenswrapper[5133]: I1121 14:07:06.090220 5133 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/20e07bbe-ddbc-445a-8201-2aa590f4f1a3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 14:07:06 crc kubenswrapper[5133]: I1121 14:07:06.090231 5133 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/20e07bbe-ddbc-445a-8201-2aa590f4f1a3-logs\") on node \"crc\" DevicePath \"\"" Nov 21 14:07:06 crc kubenswrapper[5133]: I1121 14:07:06.090274 5133 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/20e07bbe-ddbc-445a-8201-2aa590f4f1a3-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 21 14:07:06 crc kubenswrapper[5133]: I1121 14:07:06.118285 5133 scope.go:117] "RemoveContainer" containerID="4a9eaf07ead25cdf6dea3d7f7ae153a3fd284f61f07f5e5cd31f2432bfaa2560" Nov 21 14:07:06 crc kubenswrapper[5133]: I1121 14:07:06.128243 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 21 14:07:06 crc kubenswrapper[5133]: I1121 14:07:06.140493 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 21 14:07:06 crc kubenswrapper[5133]: I1121 14:07:06.148292 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 21 14:07:06 crc kubenswrapper[5133]: I1121 14:07:06.148338 5133 scope.go:117] "RemoveContainer" containerID="c5a0caca50a2fd71756d9ecfee5e5d6b206fa80f2755bc92cd4696e86b873f92" Nov 21 14:07:06 crc kubenswrapper[5133]: E1121 14:07:06.148643 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20e07bbe-ddbc-445a-8201-2aa590f4f1a3" containerName="nova-metadata-metadata" Nov 21 14:07:06 crc kubenswrapper[5133]: I1121 14:07:06.148659 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="20e07bbe-ddbc-445a-8201-2aa590f4f1a3" containerName="nova-metadata-metadata" Nov 21 14:07:06 crc kubenswrapper[5133]: E1121 14:07:06.148677 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20e07bbe-ddbc-445a-8201-2aa590f4f1a3" containerName="nova-metadata-log" Nov 21 14:07:06 crc kubenswrapper[5133]: I1121 14:07:06.148683 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="20e07bbe-ddbc-445a-8201-2aa590f4f1a3" containerName="nova-metadata-log" Nov 21 14:07:06 crc kubenswrapper[5133]: I1121 14:07:06.148873 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="20e07bbe-ddbc-445a-8201-2aa590f4f1a3" containerName="nova-metadata-log" Nov 21 14:07:06 crc kubenswrapper[5133]: I1121 14:07:06.148894 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="20e07bbe-ddbc-445a-8201-2aa590f4f1a3" containerName="nova-metadata-metadata" Nov 21 14:07:06 crc kubenswrapper[5133]: E1121 14:07:06.149066 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c5a0caca50a2fd71756d9ecfee5e5d6b206fa80f2755bc92cd4696e86b873f92\": container with ID starting with c5a0caca50a2fd71756d9ecfee5e5d6b206fa80f2755bc92cd4696e86b873f92 not found: ID does not exist" containerID="c5a0caca50a2fd71756d9ecfee5e5d6b206fa80f2755bc92cd4696e86b873f92" Nov 21 14:07:06 crc kubenswrapper[5133]: I1121 14:07:06.149115 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c5a0caca50a2fd71756d9ecfee5e5d6b206fa80f2755bc92cd4696e86b873f92"} err="failed to get container status \"c5a0caca50a2fd71756d9ecfee5e5d6b206fa80f2755bc92cd4696e86b873f92\": rpc error: code = NotFound desc = could not find container \"c5a0caca50a2fd71756d9ecfee5e5d6b206fa80f2755bc92cd4696e86b873f92\": container with ID starting with c5a0caca50a2fd71756d9ecfee5e5d6b206fa80f2755bc92cd4696e86b873f92 not found: ID does not exist" Nov 21 14:07:06 crc kubenswrapper[5133]: I1121 14:07:06.149146 5133 scope.go:117] "RemoveContainer" containerID="4a9eaf07ead25cdf6dea3d7f7ae153a3fd284f61f07f5e5cd31f2432bfaa2560" Nov 21 14:07:06 crc kubenswrapper[5133]: I1121 14:07:06.149757 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 21 14:07:06 crc kubenswrapper[5133]: E1121 14:07:06.150391 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4a9eaf07ead25cdf6dea3d7f7ae153a3fd284f61f07f5e5cd31f2432bfaa2560\": container with ID starting with 4a9eaf07ead25cdf6dea3d7f7ae153a3fd284f61f07f5e5cd31f2432bfaa2560 not found: ID does not exist" containerID="4a9eaf07ead25cdf6dea3d7f7ae153a3fd284f61f07f5e5cd31f2432bfaa2560" Nov 21 14:07:06 crc kubenswrapper[5133]: I1121 14:07:06.150488 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4a9eaf07ead25cdf6dea3d7f7ae153a3fd284f61f07f5e5cd31f2432bfaa2560"} err="failed to get container status \"4a9eaf07ead25cdf6dea3d7f7ae153a3fd284f61f07f5e5cd31f2432bfaa2560\": rpc error: code = NotFound desc = could not find container \"4a9eaf07ead25cdf6dea3d7f7ae153a3fd284f61f07f5e5cd31f2432bfaa2560\": container with ID starting with 4a9eaf07ead25cdf6dea3d7f7ae153a3fd284f61f07f5e5cd31f2432bfaa2560 not found: ID does not exist" Nov 21 14:07:06 crc kubenswrapper[5133]: I1121 14:07:06.152060 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 21 14:07:06 crc kubenswrapper[5133]: I1121 14:07:06.152603 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 21 14:07:06 crc kubenswrapper[5133]: I1121 14:07:06.158930 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 21 14:07:06 crc kubenswrapper[5133]: I1121 14:07:06.294898 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cda96e6d-092b-4f4a-8b74-e2c9771825f6-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"cda96e6d-092b-4f4a-8b74-e2c9771825f6\") " pod="openstack/nova-metadata-0" Nov 21 14:07:06 crc kubenswrapper[5133]: I1121 14:07:06.295029 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cda96e6d-092b-4f4a-8b74-e2c9771825f6-config-data\") pod \"nova-metadata-0\" (UID: \"cda96e6d-092b-4f4a-8b74-e2c9771825f6\") " pod="openstack/nova-metadata-0" Nov 21 14:07:06 crc kubenswrapper[5133]: I1121 14:07:06.295074 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cda96e6d-092b-4f4a-8b74-e2c9771825f6-logs\") pod \"nova-metadata-0\" (UID: \"cda96e6d-092b-4f4a-8b74-e2c9771825f6\") " pod="openstack/nova-metadata-0" Nov 21 14:07:06 crc kubenswrapper[5133]: I1121 14:07:06.295130 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rndmp\" (UniqueName: \"kubernetes.io/projected/cda96e6d-092b-4f4a-8b74-e2c9771825f6-kube-api-access-rndmp\") pod \"nova-metadata-0\" (UID: \"cda96e6d-092b-4f4a-8b74-e2c9771825f6\") " pod="openstack/nova-metadata-0" Nov 21 14:07:06 crc kubenswrapper[5133]: I1121 14:07:06.295177 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/cda96e6d-092b-4f4a-8b74-e2c9771825f6-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"cda96e6d-092b-4f4a-8b74-e2c9771825f6\") " pod="openstack/nova-metadata-0" Nov 21 14:07:06 crc kubenswrapper[5133]: I1121 14:07:06.396472 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cda96e6d-092b-4f4a-8b74-e2c9771825f6-logs\") pod \"nova-metadata-0\" (UID: \"cda96e6d-092b-4f4a-8b74-e2c9771825f6\") " pod="openstack/nova-metadata-0" Nov 21 14:07:06 crc kubenswrapper[5133]: I1121 14:07:06.396543 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rndmp\" (UniqueName: \"kubernetes.io/projected/cda96e6d-092b-4f4a-8b74-e2c9771825f6-kube-api-access-rndmp\") pod \"nova-metadata-0\" (UID: \"cda96e6d-092b-4f4a-8b74-e2c9771825f6\") " pod="openstack/nova-metadata-0" Nov 21 14:07:06 crc kubenswrapper[5133]: I1121 14:07:06.396600 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/cda96e6d-092b-4f4a-8b74-e2c9771825f6-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"cda96e6d-092b-4f4a-8b74-e2c9771825f6\") " pod="openstack/nova-metadata-0" Nov 21 14:07:06 crc kubenswrapper[5133]: I1121 14:07:06.396683 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cda96e6d-092b-4f4a-8b74-e2c9771825f6-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"cda96e6d-092b-4f4a-8b74-e2c9771825f6\") " pod="openstack/nova-metadata-0" Nov 21 14:07:06 crc kubenswrapper[5133]: I1121 14:07:06.396747 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cda96e6d-092b-4f4a-8b74-e2c9771825f6-config-data\") pod \"nova-metadata-0\" (UID: \"cda96e6d-092b-4f4a-8b74-e2c9771825f6\") " pod="openstack/nova-metadata-0" Nov 21 14:07:06 crc kubenswrapper[5133]: I1121 14:07:06.397110 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cda96e6d-092b-4f4a-8b74-e2c9771825f6-logs\") pod \"nova-metadata-0\" (UID: \"cda96e6d-092b-4f4a-8b74-e2c9771825f6\") " pod="openstack/nova-metadata-0" Nov 21 14:07:06 crc kubenswrapper[5133]: I1121 14:07:06.400822 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/cda96e6d-092b-4f4a-8b74-e2c9771825f6-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"cda96e6d-092b-4f4a-8b74-e2c9771825f6\") " pod="openstack/nova-metadata-0" Nov 21 14:07:06 crc kubenswrapper[5133]: I1121 14:07:06.400840 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cda96e6d-092b-4f4a-8b74-e2c9771825f6-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"cda96e6d-092b-4f4a-8b74-e2c9771825f6\") " pod="openstack/nova-metadata-0" Nov 21 14:07:06 crc kubenswrapper[5133]: I1121 14:07:06.401384 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cda96e6d-092b-4f4a-8b74-e2c9771825f6-config-data\") pod \"nova-metadata-0\" (UID: \"cda96e6d-092b-4f4a-8b74-e2c9771825f6\") " pod="openstack/nova-metadata-0" Nov 21 14:07:06 crc kubenswrapper[5133]: I1121 14:07:06.417162 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rndmp\" (UniqueName: \"kubernetes.io/projected/cda96e6d-092b-4f4a-8b74-e2c9771825f6-kube-api-access-rndmp\") pod \"nova-metadata-0\" (UID: \"cda96e6d-092b-4f4a-8b74-e2c9771825f6\") " pod="openstack/nova-metadata-0" Nov 21 14:07:06 crc kubenswrapper[5133]: I1121 14:07:06.468116 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20e07bbe-ddbc-445a-8201-2aa590f4f1a3" path="/var/lib/kubelet/pods/20e07bbe-ddbc-445a-8201-2aa590f4f1a3/volumes" Nov 21 14:07:06 crc kubenswrapper[5133]: I1121 14:07:06.476164 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 21 14:07:06 crc kubenswrapper[5133]: I1121 14:07:06.981536 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 21 14:07:06 crc kubenswrapper[5133]: W1121 14:07:06.988766 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcda96e6d_092b_4f4a_8b74_e2c9771825f6.slice/crio-4bb8cc63221dbe120aba458279a52770da053c259a2bfb0d33d3ca9176362290 WatchSource:0}: Error finding container 4bb8cc63221dbe120aba458279a52770da053c259a2bfb0d33d3ca9176362290: Status 404 returned error can't find the container with id 4bb8cc63221dbe120aba458279a52770da053c259a2bfb0d33d3ca9176362290 Nov 21 14:07:07 crc kubenswrapper[5133]: I1121 14:07:07.056967 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"cda96e6d-092b-4f4a-8b74-e2c9771825f6","Type":"ContainerStarted","Data":"4bb8cc63221dbe120aba458279a52770da053c259a2bfb0d33d3ca9176362290"} Nov 21 14:07:07 crc kubenswrapper[5133]: I1121 14:07:07.974864 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 21 14:07:08 crc kubenswrapper[5133]: I1121 14:07:08.070860 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"cda96e6d-092b-4f4a-8b74-e2c9771825f6","Type":"ContainerStarted","Data":"f1a2e8ec1703871baf88f3a54d464f548fb62f436399339bf7b80db22bc5cd53"} Nov 21 14:07:08 crc kubenswrapper[5133]: I1121 14:07:08.070905 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"cda96e6d-092b-4f4a-8b74-e2c9771825f6","Type":"ContainerStarted","Data":"39862f91640b568f53a85e6c4b09153ab7e1d7c894da39c13c9997cb4c74a0ab"} Nov 21 14:07:08 crc kubenswrapper[5133]: I1121 14:07:08.073456 5133 generic.go:334] "Generic (PLEG): container finished" podID="ed15e88e-868f-4a35-9966-6d486325db91" containerID="f80dfaf053872459d558aac0e0782cd60f18e1649f9aec950a194c37ee3aa585" exitCode=0 Nov 21 14:07:08 crc kubenswrapper[5133]: I1121 14:07:08.073519 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"ed15e88e-868f-4a35-9966-6d486325db91","Type":"ContainerDied","Data":"f80dfaf053872459d558aac0e0782cd60f18e1649f9aec950a194c37ee3aa585"} Nov 21 14:07:08 crc kubenswrapper[5133]: I1121 14:07:08.073548 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"ed15e88e-868f-4a35-9966-6d486325db91","Type":"ContainerDied","Data":"d47d79ccad21ed3f086e9cec925782891cad7a0b50f3c3ea5f72816db7126d12"} Nov 21 14:07:08 crc kubenswrapper[5133]: I1121 14:07:08.073591 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 21 14:07:08 crc kubenswrapper[5133]: I1121 14:07:08.073807 5133 scope.go:117] "RemoveContainer" containerID="f80dfaf053872459d558aac0e0782cd60f18e1649f9aec950a194c37ee3aa585" Nov 21 14:07:08 crc kubenswrapper[5133]: I1121 14:07:08.095773 5133 scope.go:117] "RemoveContainer" containerID="f80dfaf053872459d558aac0e0782cd60f18e1649f9aec950a194c37ee3aa585" Nov 21 14:07:08 crc kubenswrapper[5133]: I1121 14:07:08.096121 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.096101016 podStartE2EDuration="2.096101016s" podCreationTimestamp="2025-11-21 14:07:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 14:07:08.091167694 +0000 UTC m=+1487.888999942" watchObservedRunningTime="2025-11-21 14:07:08.096101016 +0000 UTC m=+1487.893933264" Nov 21 14:07:08 crc kubenswrapper[5133]: E1121 14:07:08.096379 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f80dfaf053872459d558aac0e0782cd60f18e1649f9aec950a194c37ee3aa585\": container with ID starting with f80dfaf053872459d558aac0e0782cd60f18e1649f9aec950a194c37ee3aa585 not found: ID does not exist" containerID="f80dfaf053872459d558aac0e0782cd60f18e1649f9aec950a194c37ee3aa585" Nov 21 14:07:08 crc kubenswrapper[5133]: I1121 14:07:08.096459 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f80dfaf053872459d558aac0e0782cd60f18e1649f9aec950a194c37ee3aa585"} err="failed to get container status \"f80dfaf053872459d558aac0e0782cd60f18e1649f9aec950a194c37ee3aa585\": rpc error: code = NotFound desc = could not find container \"f80dfaf053872459d558aac0e0782cd60f18e1649f9aec950a194c37ee3aa585\": container with ID starting with f80dfaf053872459d558aac0e0782cd60f18e1649f9aec950a194c37ee3aa585 not found: ID does not exist" Nov 21 14:07:08 crc kubenswrapper[5133]: I1121 14:07:08.129238 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed15e88e-868f-4a35-9966-6d486325db91-combined-ca-bundle\") pod \"ed15e88e-868f-4a35-9966-6d486325db91\" (UID: \"ed15e88e-868f-4a35-9966-6d486325db91\") " Nov 21 14:07:08 crc kubenswrapper[5133]: I1121 14:07:08.129490 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ed15e88e-868f-4a35-9966-6d486325db91-config-data\") pod \"ed15e88e-868f-4a35-9966-6d486325db91\" (UID: \"ed15e88e-868f-4a35-9966-6d486325db91\") " Nov 21 14:07:08 crc kubenswrapper[5133]: I1121 14:07:08.129522 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2xjmf\" (UniqueName: \"kubernetes.io/projected/ed15e88e-868f-4a35-9966-6d486325db91-kube-api-access-2xjmf\") pod \"ed15e88e-868f-4a35-9966-6d486325db91\" (UID: \"ed15e88e-868f-4a35-9966-6d486325db91\") " Nov 21 14:07:08 crc kubenswrapper[5133]: I1121 14:07:08.137288 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ed15e88e-868f-4a35-9966-6d486325db91-kube-api-access-2xjmf" (OuterVolumeSpecName: "kube-api-access-2xjmf") pod "ed15e88e-868f-4a35-9966-6d486325db91" (UID: "ed15e88e-868f-4a35-9966-6d486325db91"). InnerVolumeSpecName "kube-api-access-2xjmf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:07:08 crc kubenswrapper[5133]: I1121 14:07:08.160266 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed15e88e-868f-4a35-9966-6d486325db91-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ed15e88e-868f-4a35-9966-6d486325db91" (UID: "ed15e88e-868f-4a35-9966-6d486325db91"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:07:08 crc kubenswrapper[5133]: I1121 14:07:08.168291 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed15e88e-868f-4a35-9966-6d486325db91-config-data" (OuterVolumeSpecName: "config-data") pod "ed15e88e-868f-4a35-9966-6d486325db91" (UID: "ed15e88e-868f-4a35-9966-6d486325db91"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:07:08 crc kubenswrapper[5133]: I1121 14:07:08.234705 5133 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ed15e88e-868f-4a35-9966-6d486325db91-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 14:07:08 crc kubenswrapper[5133]: I1121 14:07:08.235309 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2xjmf\" (UniqueName: \"kubernetes.io/projected/ed15e88e-868f-4a35-9966-6d486325db91-kube-api-access-2xjmf\") on node \"crc\" DevicePath \"\"" Nov 21 14:07:08 crc kubenswrapper[5133]: I1121 14:07:08.235377 5133 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed15e88e-868f-4a35-9966-6d486325db91-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 14:07:08 crc kubenswrapper[5133]: I1121 14:07:08.407774 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 21 14:07:08 crc kubenswrapper[5133]: I1121 14:07:08.430269 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 21 14:07:08 crc kubenswrapper[5133]: I1121 14:07:08.449840 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 21 14:07:08 crc kubenswrapper[5133]: E1121 14:07:08.450520 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed15e88e-868f-4a35-9966-6d486325db91" containerName="nova-scheduler-scheduler" Nov 21 14:07:08 crc kubenswrapper[5133]: I1121 14:07:08.450636 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed15e88e-868f-4a35-9966-6d486325db91" containerName="nova-scheduler-scheduler" Nov 21 14:07:08 crc kubenswrapper[5133]: I1121 14:07:08.450904 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="ed15e88e-868f-4a35-9966-6d486325db91" containerName="nova-scheduler-scheduler" Nov 21 14:07:08 crc kubenswrapper[5133]: I1121 14:07:08.451717 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 21 14:07:08 crc kubenswrapper[5133]: I1121 14:07:08.455394 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 21 14:07:08 crc kubenswrapper[5133]: I1121 14:07:08.473668 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ed15e88e-868f-4a35-9966-6d486325db91" path="/var/lib/kubelet/pods/ed15e88e-868f-4a35-9966-6d486325db91/volumes" Nov 21 14:07:08 crc kubenswrapper[5133]: I1121 14:07:08.474886 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 21 14:07:08 crc kubenswrapper[5133]: I1121 14:07:08.541263 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/591d47e3-df0d-4bc0-bc6a-c51c1f40c8a8-config-data\") pod \"nova-scheduler-0\" (UID: \"591d47e3-df0d-4bc0-bc6a-c51c1f40c8a8\") " pod="openstack/nova-scheduler-0" Nov 21 14:07:08 crc kubenswrapper[5133]: I1121 14:07:08.541364 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fnbdb\" (UniqueName: \"kubernetes.io/projected/591d47e3-df0d-4bc0-bc6a-c51c1f40c8a8-kube-api-access-fnbdb\") pod \"nova-scheduler-0\" (UID: \"591d47e3-df0d-4bc0-bc6a-c51c1f40c8a8\") " pod="openstack/nova-scheduler-0" Nov 21 14:07:08 crc kubenswrapper[5133]: I1121 14:07:08.541605 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/591d47e3-df0d-4bc0-bc6a-c51c1f40c8a8-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"591d47e3-df0d-4bc0-bc6a-c51c1f40c8a8\") " pod="openstack/nova-scheduler-0" Nov 21 14:07:08 crc kubenswrapper[5133]: I1121 14:07:08.643571 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/591d47e3-df0d-4bc0-bc6a-c51c1f40c8a8-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"591d47e3-df0d-4bc0-bc6a-c51c1f40c8a8\") " pod="openstack/nova-scheduler-0" Nov 21 14:07:08 crc kubenswrapper[5133]: I1121 14:07:08.643742 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/591d47e3-df0d-4bc0-bc6a-c51c1f40c8a8-config-data\") pod \"nova-scheduler-0\" (UID: \"591d47e3-df0d-4bc0-bc6a-c51c1f40c8a8\") " pod="openstack/nova-scheduler-0" Nov 21 14:07:08 crc kubenswrapper[5133]: I1121 14:07:08.643808 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fnbdb\" (UniqueName: \"kubernetes.io/projected/591d47e3-df0d-4bc0-bc6a-c51c1f40c8a8-kube-api-access-fnbdb\") pod \"nova-scheduler-0\" (UID: \"591d47e3-df0d-4bc0-bc6a-c51c1f40c8a8\") " pod="openstack/nova-scheduler-0" Nov 21 14:07:08 crc kubenswrapper[5133]: I1121 14:07:08.648863 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/591d47e3-df0d-4bc0-bc6a-c51c1f40c8a8-config-data\") pod \"nova-scheduler-0\" (UID: \"591d47e3-df0d-4bc0-bc6a-c51c1f40c8a8\") " pod="openstack/nova-scheduler-0" Nov 21 14:07:08 crc kubenswrapper[5133]: I1121 14:07:08.652573 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/591d47e3-df0d-4bc0-bc6a-c51c1f40c8a8-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"591d47e3-df0d-4bc0-bc6a-c51c1f40c8a8\") " pod="openstack/nova-scheduler-0" Nov 21 14:07:08 crc kubenswrapper[5133]: I1121 14:07:08.662712 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fnbdb\" (UniqueName: \"kubernetes.io/projected/591d47e3-df0d-4bc0-bc6a-c51c1f40c8a8-kube-api-access-fnbdb\") pod \"nova-scheduler-0\" (UID: \"591d47e3-df0d-4bc0-bc6a-c51c1f40c8a8\") " pod="openstack/nova-scheduler-0" Nov 21 14:07:08 crc kubenswrapper[5133]: I1121 14:07:08.771325 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 21 14:07:09 crc kubenswrapper[5133]: I1121 14:07:09.306208 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 21 14:07:09 crc kubenswrapper[5133]: W1121 14:07:09.307636 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod591d47e3_df0d_4bc0_bc6a_c51c1f40c8a8.slice/crio-4486f2b03f049e629392ce1682f28f378a69b54674abcbad891664d127375536 WatchSource:0}: Error finding container 4486f2b03f049e629392ce1682f28f378a69b54674abcbad891664d127375536: Status 404 returned error can't find the container with id 4486f2b03f049e629392ce1682f28f378a69b54674abcbad891664d127375536 Nov 21 14:07:10 crc kubenswrapper[5133]: I1121 14:07:10.098497 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"591d47e3-df0d-4bc0-bc6a-c51c1f40c8a8","Type":"ContainerStarted","Data":"90e34f4e1f7528f8a30cccca6955ad10ac71d3eda28c1c12b039c7267ac3eccb"} Nov 21 14:07:10 crc kubenswrapper[5133]: I1121 14:07:10.098803 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"591d47e3-df0d-4bc0-bc6a-c51c1f40c8a8","Type":"ContainerStarted","Data":"4486f2b03f049e629392ce1682f28f378a69b54674abcbad891664d127375536"} Nov 21 14:07:10 crc kubenswrapper[5133]: I1121 14:07:10.121910 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.121881243 podStartE2EDuration="2.121881243s" podCreationTimestamp="2025-11-21 14:07:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 14:07:10.115673037 +0000 UTC m=+1489.913505325" watchObservedRunningTime="2025-11-21 14:07:10.121881243 +0000 UTC m=+1489.919713511" Nov 21 14:07:11 crc kubenswrapper[5133]: I1121 14:07:11.477254 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 21 14:07:11 crc kubenswrapper[5133]: I1121 14:07:11.477769 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 21 14:07:13 crc kubenswrapper[5133]: I1121 14:07:13.752430 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 21 14:07:13 crc kubenswrapper[5133]: I1121 14:07:13.752882 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 21 14:07:13 crc kubenswrapper[5133]: I1121 14:07:13.772360 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 21 14:07:14 crc kubenswrapper[5133]: I1121 14:07:14.766337 5133 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="22807ad8-e61a-4708-8dda-5863f054c746" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.184:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 21 14:07:14 crc kubenswrapper[5133]: I1121 14:07:14.766372 5133 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="22807ad8-e61a-4708-8dda-5863f054c746" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.184:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 21 14:07:16 crc kubenswrapper[5133]: I1121 14:07:16.476803 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 21 14:07:16 crc kubenswrapper[5133]: I1121 14:07:16.477374 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 21 14:07:17 crc kubenswrapper[5133]: I1121 14:07:17.491309 5133 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="cda96e6d-092b-4f4a-8b74-e2c9771825f6" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.185:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 21 14:07:17 crc kubenswrapper[5133]: I1121 14:07:17.491401 5133 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="cda96e6d-092b-4f4a-8b74-e2c9771825f6" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.185:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 21 14:07:18 crc kubenswrapper[5133]: I1121 14:07:18.772283 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 21 14:07:18 crc kubenswrapper[5133]: I1121 14:07:18.798469 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 21 14:07:19 crc kubenswrapper[5133]: I1121 14:07:19.247037 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 21 14:07:21 crc kubenswrapper[5133]: I1121 14:07:21.756724 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 21 14:07:23 crc kubenswrapper[5133]: I1121 14:07:23.759885 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 21 14:07:23 crc kubenswrapper[5133]: I1121 14:07:23.760588 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 21 14:07:23 crc kubenswrapper[5133]: I1121 14:07:23.762441 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 21 14:07:23 crc kubenswrapper[5133]: I1121 14:07:23.767609 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 21 14:07:24 crc kubenswrapper[5133]: I1121 14:07:24.266596 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 21 14:07:24 crc kubenswrapper[5133]: I1121 14:07:24.280247 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 21 14:07:26 crc kubenswrapper[5133]: I1121 14:07:26.485884 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 21 14:07:26 crc kubenswrapper[5133]: I1121 14:07:26.487504 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 21 14:07:26 crc kubenswrapper[5133]: I1121 14:07:26.493908 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 21 14:07:27 crc kubenswrapper[5133]: I1121 14:07:27.313553 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 21 14:07:35 crc kubenswrapper[5133]: I1121 14:07:35.650775 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 21 14:07:36 crc kubenswrapper[5133]: I1121 14:07:36.745495 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 21 14:07:40 crc kubenswrapper[5133]: I1121 14:07:40.112562 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="9aa1caed-f687-4526-a851-59b4d192b705" containerName="rabbitmq" containerID="cri-o://bd3fe1e8202ee70b5ddcd1ed9c071a0cb93f6beae9a86622fc0017a4dd4a1716" gracePeriod=604796 Nov 21 14:07:40 crc kubenswrapper[5133]: I1121 14:07:40.173812 5133 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="9aa1caed-f687-4526-a851-59b4d192b705" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.98:5671: connect: connection refused" Nov 21 14:07:41 crc kubenswrapper[5133]: I1121 14:07:41.291341 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="2c9873f2-025e-499f-8a76-47c38495fd75" containerName="rabbitmq" containerID="cri-o://e453506f9e3a42b8e238b5fe885a82325983fce8b3d35ff909c123419ee46015" gracePeriod=604796 Nov 21 14:07:46 crc kubenswrapper[5133]: I1121 14:07:46.554485 5133 generic.go:334] "Generic (PLEG): container finished" podID="9aa1caed-f687-4526-a851-59b4d192b705" containerID="bd3fe1e8202ee70b5ddcd1ed9c071a0cb93f6beae9a86622fc0017a4dd4a1716" exitCode=0 Nov 21 14:07:46 crc kubenswrapper[5133]: I1121 14:07:46.554702 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"9aa1caed-f687-4526-a851-59b4d192b705","Type":"ContainerDied","Data":"bd3fe1e8202ee70b5ddcd1ed9c071a0cb93f6beae9a86622fc0017a4dd4a1716"} Nov 21 14:07:46 crc kubenswrapper[5133]: I1121 14:07:46.761386 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 21 14:07:46 crc kubenswrapper[5133]: I1121 14:07:46.875699 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/9aa1caed-f687-4526-a851-59b4d192b705-server-conf\") pod \"9aa1caed-f687-4526-a851-59b4d192b705\" (UID: \"9aa1caed-f687-4526-a851-59b4d192b705\") " Nov 21 14:07:46 crc kubenswrapper[5133]: I1121 14:07:46.875768 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/9aa1caed-f687-4526-a851-59b4d192b705-plugins-conf\") pod \"9aa1caed-f687-4526-a851-59b4d192b705\" (UID: \"9aa1caed-f687-4526-a851-59b4d192b705\") " Nov 21 14:07:46 crc kubenswrapper[5133]: I1121 14:07:46.875819 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/9aa1caed-f687-4526-a851-59b4d192b705-pod-info\") pod \"9aa1caed-f687-4526-a851-59b4d192b705\" (UID: \"9aa1caed-f687-4526-a851-59b4d192b705\") " Nov 21 14:07:46 crc kubenswrapper[5133]: I1121 14:07:46.875843 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/9aa1caed-f687-4526-a851-59b4d192b705-rabbitmq-plugins\") pod \"9aa1caed-f687-4526-a851-59b4d192b705\" (UID: \"9aa1caed-f687-4526-a851-59b4d192b705\") " Nov 21 14:07:46 crc kubenswrapper[5133]: I1121 14:07:46.875867 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/9aa1caed-f687-4526-a851-59b4d192b705-rabbitmq-confd\") pod \"9aa1caed-f687-4526-a851-59b4d192b705\" (UID: \"9aa1caed-f687-4526-a851-59b4d192b705\") " Nov 21 14:07:46 crc kubenswrapper[5133]: I1121 14:07:46.875888 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/9aa1caed-f687-4526-a851-59b4d192b705-erlang-cookie-secret\") pod \"9aa1caed-f687-4526-a851-59b4d192b705\" (UID: \"9aa1caed-f687-4526-a851-59b4d192b705\") " Nov 21 14:07:46 crc kubenswrapper[5133]: I1121 14:07:46.875909 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9aa1caed-f687-4526-a851-59b4d192b705-config-data\") pod \"9aa1caed-f687-4526-a851-59b4d192b705\" (UID: \"9aa1caed-f687-4526-a851-59b4d192b705\") " Nov 21 14:07:46 crc kubenswrapper[5133]: I1121 14:07:46.875965 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/9aa1caed-f687-4526-a851-59b4d192b705-rabbitmq-tls\") pod \"9aa1caed-f687-4526-a851-59b4d192b705\" (UID: \"9aa1caed-f687-4526-a851-59b4d192b705\") " Nov 21 14:07:46 crc kubenswrapper[5133]: I1121 14:07:46.876033 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-22d9n\" (UniqueName: \"kubernetes.io/projected/9aa1caed-f687-4526-a851-59b4d192b705-kube-api-access-22d9n\") pod \"9aa1caed-f687-4526-a851-59b4d192b705\" (UID: \"9aa1caed-f687-4526-a851-59b4d192b705\") " Nov 21 14:07:46 crc kubenswrapper[5133]: I1121 14:07:46.876070 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"9aa1caed-f687-4526-a851-59b4d192b705\" (UID: \"9aa1caed-f687-4526-a851-59b4d192b705\") " Nov 21 14:07:46 crc kubenswrapper[5133]: I1121 14:07:46.876123 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/9aa1caed-f687-4526-a851-59b4d192b705-rabbitmq-erlang-cookie\") pod \"9aa1caed-f687-4526-a851-59b4d192b705\" (UID: \"9aa1caed-f687-4526-a851-59b4d192b705\") " Nov 21 14:07:46 crc kubenswrapper[5133]: I1121 14:07:46.878149 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9aa1caed-f687-4526-a851-59b4d192b705-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "9aa1caed-f687-4526-a851-59b4d192b705" (UID: "9aa1caed-f687-4526-a851-59b4d192b705"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:07:46 crc kubenswrapper[5133]: I1121 14:07:46.878246 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9aa1caed-f687-4526-a851-59b4d192b705-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "9aa1caed-f687-4526-a851-59b4d192b705" (UID: "9aa1caed-f687-4526-a851-59b4d192b705"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:07:46 crc kubenswrapper[5133]: I1121 14:07:46.878527 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9aa1caed-f687-4526-a851-59b4d192b705-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "9aa1caed-f687-4526-a851-59b4d192b705" (UID: "9aa1caed-f687-4526-a851-59b4d192b705"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:07:46 crc kubenswrapper[5133]: I1121 14:07:46.883349 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage02-crc" (OuterVolumeSpecName: "persistence") pod "9aa1caed-f687-4526-a851-59b4d192b705" (UID: "9aa1caed-f687-4526-a851-59b4d192b705"). InnerVolumeSpecName "local-storage02-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 21 14:07:46 crc kubenswrapper[5133]: I1121 14:07:46.884492 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/9aa1caed-f687-4526-a851-59b4d192b705-pod-info" (OuterVolumeSpecName: "pod-info") pod "9aa1caed-f687-4526-a851-59b4d192b705" (UID: "9aa1caed-f687-4526-a851-59b4d192b705"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 21 14:07:46 crc kubenswrapper[5133]: I1121 14:07:46.889545 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9aa1caed-f687-4526-a851-59b4d192b705-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "9aa1caed-f687-4526-a851-59b4d192b705" (UID: "9aa1caed-f687-4526-a851-59b4d192b705"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:07:46 crc kubenswrapper[5133]: I1121 14:07:46.896596 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9aa1caed-f687-4526-a851-59b4d192b705-kube-api-access-22d9n" (OuterVolumeSpecName: "kube-api-access-22d9n") pod "9aa1caed-f687-4526-a851-59b4d192b705" (UID: "9aa1caed-f687-4526-a851-59b4d192b705"). InnerVolumeSpecName "kube-api-access-22d9n". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:07:46 crc kubenswrapper[5133]: I1121 14:07:46.906526 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9aa1caed-f687-4526-a851-59b4d192b705-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "9aa1caed-f687-4526-a851-59b4d192b705" (UID: "9aa1caed-f687-4526-a851-59b4d192b705"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:07:46 crc kubenswrapper[5133]: I1121 14:07:46.928252 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9aa1caed-f687-4526-a851-59b4d192b705-config-data" (OuterVolumeSpecName: "config-data") pod "9aa1caed-f687-4526-a851-59b4d192b705" (UID: "9aa1caed-f687-4526-a851-59b4d192b705"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:07:46 crc kubenswrapper[5133]: I1121 14:07:46.932126 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9aa1caed-f687-4526-a851-59b4d192b705-server-conf" (OuterVolumeSpecName: "server-conf") pod "9aa1caed-f687-4526-a851-59b4d192b705" (UID: "9aa1caed-f687-4526-a851-59b4d192b705"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:07:46 crc kubenswrapper[5133]: I1121 14:07:46.978390 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-22d9n\" (UniqueName: \"kubernetes.io/projected/9aa1caed-f687-4526-a851-59b4d192b705-kube-api-access-22d9n\") on node \"crc\" DevicePath \"\"" Nov 21 14:07:46 crc kubenswrapper[5133]: I1121 14:07:46.978614 5133 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" " Nov 21 14:07:46 crc kubenswrapper[5133]: I1121 14:07:46.978689 5133 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/9aa1caed-f687-4526-a851-59b4d192b705-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 21 14:07:46 crc kubenswrapper[5133]: I1121 14:07:46.978743 5133 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/9aa1caed-f687-4526-a851-59b4d192b705-server-conf\") on node \"crc\" DevicePath \"\"" Nov 21 14:07:46 crc kubenswrapper[5133]: I1121 14:07:46.978803 5133 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/9aa1caed-f687-4526-a851-59b4d192b705-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 21 14:07:46 crc kubenswrapper[5133]: I1121 14:07:46.978860 5133 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/9aa1caed-f687-4526-a851-59b4d192b705-pod-info\") on node \"crc\" DevicePath \"\"" Nov 21 14:07:46 crc kubenswrapper[5133]: I1121 14:07:46.978920 5133 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/9aa1caed-f687-4526-a851-59b4d192b705-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 21 14:07:46 crc kubenswrapper[5133]: I1121 14:07:46.978972 5133 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/9aa1caed-f687-4526-a851-59b4d192b705-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 21 14:07:46 crc kubenswrapper[5133]: I1121 14:07:46.980206 5133 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9aa1caed-f687-4526-a851-59b4d192b705-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 14:07:46 crc kubenswrapper[5133]: I1121 14:07:46.980251 5133 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/9aa1caed-f687-4526-a851-59b4d192b705-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 21 14:07:47 crc kubenswrapper[5133]: I1121 14:07:47.007342 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9aa1caed-f687-4526-a851-59b4d192b705-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "9aa1caed-f687-4526-a851-59b4d192b705" (UID: "9aa1caed-f687-4526-a851-59b4d192b705"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:07:47 crc kubenswrapper[5133]: I1121 14:07:47.012396 5133 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage02-crc" (UniqueName: "kubernetes.io/local-volume/local-storage02-crc") on node "crc" Nov 21 14:07:47 crc kubenswrapper[5133]: I1121 14:07:47.082504 5133 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/9aa1caed-f687-4526-a851-59b4d192b705-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 21 14:07:47 crc kubenswrapper[5133]: I1121 14:07:47.082541 5133 reconciler_common.go:293] "Volume detached for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" DevicePath \"\"" Nov 21 14:07:47 crc kubenswrapper[5133]: I1121 14:07:47.566058 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"9aa1caed-f687-4526-a851-59b4d192b705","Type":"ContainerDied","Data":"a7fc3f8b7d21155945191d7e900e3bff6b85752089c4e0fda63c12885d5423cf"} Nov 21 14:07:47 crc kubenswrapper[5133]: I1121 14:07:47.566103 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 21 14:07:47 crc kubenswrapper[5133]: I1121 14:07:47.566123 5133 scope.go:117] "RemoveContainer" containerID="bd3fe1e8202ee70b5ddcd1ed9c071a0cb93f6beae9a86622fc0017a4dd4a1716" Nov 21 14:07:47 crc kubenswrapper[5133]: I1121 14:07:47.574230 5133 generic.go:334] "Generic (PLEG): container finished" podID="2c9873f2-025e-499f-8a76-47c38495fd75" containerID="e453506f9e3a42b8e238b5fe885a82325983fce8b3d35ff909c123419ee46015" exitCode=0 Nov 21 14:07:47 crc kubenswrapper[5133]: I1121 14:07:47.574290 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"2c9873f2-025e-499f-8a76-47c38495fd75","Type":"ContainerDied","Data":"e453506f9e3a42b8e238b5fe885a82325983fce8b3d35ff909c123419ee46015"} Nov 21 14:07:47 crc kubenswrapper[5133]: I1121 14:07:47.596922 5133 scope.go:117] "RemoveContainer" containerID="12333d78e0e6447e201e2ddaf5c5f5c05242a23fc2120dc8a1be00a6d2499df8" Nov 21 14:07:47 crc kubenswrapper[5133]: I1121 14:07:47.616445 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 21 14:07:47 crc kubenswrapper[5133]: I1121 14:07:47.624310 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 21 14:07:47 crc kubenswrapper[5133]: I1121 14:07:47.645260 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 21 14:07:47 crc kubenswrapper[5133]: E1121 14:07:47.645789 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9aa1caed-f687-4526-a851-59b4d192b705" containerName="setup-container" Nov 21 14:07:47 crc kubenswrapper[5133]: I1121 14:07:47.645812 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="9aa1caed-f687-4526-a851-59b4d192b705" containerName="setup-container" Nov 21 14:07:47 crc kubenswrapper[5133]: E1121 14:07:47.645831 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9aa1caed-f687-4526-a851-59b4d192b705" containerName="rabbitmq" Nov 21 14:07:47 crc kubenswrapper[5133]: I1121 14:07:47.645840 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="9aa1caed-f687-4526-a851-59b4d192b705" containerName="rabbitmq" Nov 21 14:07:47 crc kubenswrapper[5133]: I1121 14:07:47.646070 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="9aa1caed-f687-4526-a851-59b4d192b705" containerName="rabbitmq" Nov 21 14:07:47 crc kubenswrapper[5133]: I1121 14:07:47.647436 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 21 14:07:47 crc kubenswrapper[5133]: I1121 14:07:47.649527 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 21 14:07:47 crc kubenswrapper[5133]: I1121 14:07:47.649937 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-knfp8" Nov 21 14:07:47 crc kubenswrapper[5133]: I1121 14:07:47.650088 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 21 14:07:47 crc kubenswrapper[5133]: I1121 14:07:47.650169 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 21 14:07:47 crc kubenswrapper[5133]: I1121 14:07:47.650381 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 21 14:07:47 crc kubenswrapper[5133]: I1121 14:07:47.651311 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 21 14:07:47 crc kubenswrapper[5133]: I1121 14:07:47.651514 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 21 14:07:47 crc kubenswrapper[5133]: I1121 14:07:47.676293 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 21 14:07:47 crc kubenswrapper[5133]: I1121 14:07:47.814009 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/83679b73-67d4-4733-b362-44060f589afd-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"83679b73-67d4-4733-b362-44060f589afd\") " pod="openstack/rabbitmq-server-0" Nov 21 14:07:47 crc kubenswrapper[5133]: I1121 14:07:47.814317 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/83679b73-67d4-4733-b362-44060f589afd-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"83679b73-67d4-4733-b362-44060f589afd\") " pod="openstack/rabbitmq-server-0" Nov 21 14:07:47 crc kubenswrapper[5133]: I1121 14:07:47.814370 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/83679b73-67d4-4733-b362-44060f589afd-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"83679b73-67d4-4733-b362-44060f589afd\") " pod="openstack/rabbitmq-server-0" Nov 21 14:07:47 crc kubenswrapper[5133]: I1121 14:07:47.814390 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/83679b73-67d4-4733-b362-44060f589afd-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"83679b73-67d4-4733-b362-44060f589afd\") " pod="openstack/rabbitmq-server-0" Nov 21 14:07:47 crc kubenswrapper[5133]: I1121 14:07:47.814421 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j5tkh\" (UniqueName: \"kubernetes.io/projected/83679b73-67d4-4733-b362-44060f589afd-kube-api-access-j5tkh\") pod \"rabbitmq-server-0\" (UID: \"83679b73-67d4-4733-b362-44060f589afd\") " pod="openstack/rabbitmq-server-0" Nov 21 14:07:47 crc kubenswrapper[5133]: I1121 14:07:47.814437 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/83679b73-67d4-4733-b362-44060f589afd-server-conf\") pod \"rabbitmq-server-0\" (UID: \"83679b73-67d4-4733-b362-44060f589afd\") " pod="openstack/rabbitmq-server-0" Nov 21 14:07:47 crc kubenswrapper[5133]: I1121 14:07:47.814463 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/83679b73-67d4-4733-b362-44060f589afd-config-data\") pod \"rabbitmq-server-0\" (UID: \"83679b73-67d4-4733-b362-44060f589afd\") " pod="openstack/rabbitmq-server-0" Nov 21 14:07:47 crc kubenswrapper[5133]: I1121 14:07:47.814481 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/83679b73-67d4-4733-b362-44060f589afd-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"83679b73-67d4-4733-b362-44060f589afd\") " pod="openstack/rabbitmq-server-0" Nov 21 14:07:47 crc kubenswrapper[5133]: I1121 14:07:47.814504 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/83679b73-67d4-4733-b362-44060f589afd-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"83679b73-67d4-4733-b362-44060f589afd\") " pod="openstack/rabbitmq-server-0" Nov 21 14:07:47 crc kubenswrapper[5133]: I1121 14:07:47.814523 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/83679b73-67d4-4733-b362-44060f589afd-pod-info\") pod \"rabbitmq-server-0\" (UID: \"83679b73-67d4-4733-b362-44060f589afd\") " pod="openstack/rabbitmq-server-0" Nov 21 14:07:47 crc kubenswrapper[5133]: I1121 14:07:47.814541 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"83679b73-67d4-4733-b362-44060f589afd\") " pod="openstack/rabbitmq-server-0" Nov 21 14:07:47 crc kubenswrapper[5133]: I1121 14:07:47.915808 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/83679b73-67d4-4733-b362-44060f589afd-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"83679b73-67d4-4733-b362-44060f589afd\") " pod="openstack/rabbitmq-server-0" Nov 21 14:07:47 crc kubenswrapper[5133]: I1121 14:07:47.915924 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/83679b73-67d4-4733-b362-44060f589afd-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"83679b73-67d4-4733-b362-44060f589afd\") " pod="openstack/rabbitmq-server-0" Nov 21 14:07:47 crc kubenswrapper[5133]: I1121 14:07:47.915981 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/83679b73-67d4-4733-b362-44060f589afd-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"83679b73-67d4-4733-b362-44060f589afd\") " pod="openstack/rabbitmq-server-0" Nov 21 14:07:47 crc kubenswrapper[5133]: I1121 14:07:47.916031 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/83679b73-67d4-4733-b362-44060f589afd-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"83679b73-67d4-4733-b362-44060f589afd\") " pod="openstack/rabbitmq-server-0" Nov 21 14:07:47 crc kubenswrapper[5133]: I1121 14:07:47.916072 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j5tkh\" (UniqueName: \"kubernetes.io/projected/83679b73-67d4-4733-b362-44060f589afd-kube-api-access-j5tkh\") pod \"rabbitmq-server-0\" (UID: \"83679b73-67d4-4733-b362-44060f589afd\") " pod="openstack/rabbitmq-server-0" Nov 21 14:07:47 crc kubenswrapper[5133]: I1121 14:07:47.916096 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/83679b73-67d4-4733-b362-44060f589afd-server-conf\") pod \"rabbitmq-server-0\" (UID: \"83679b73-67d4-4733-b362-44060f589afd\") " pod="openstack/rabbitmq-server-0" Nov 21 14:07:47 crc kubenswrapper[5133]: I1121 14:07:47.916135 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/83679b73-67d4-4733-b362-44060f589afd-config-data\") pod \"rabbitmq-server-0\" (UID: \"83679b73-67d4-4733-b362-44060f589afd\") " pod="openstack/rabbitmq-server-0" Nov 21 14:07:47 crc kubenswrapper[5133]: I1121 14:07:47.916159 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/83679b73-67d4-4733-b362-44060f589afd-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"83679b73-67d4-4733-b362-44060f589afd\") " pod="openstack/rabbitmq-server-0" Nov 21 14:07:47 crc kubenswrapper[5133]: I1121 14:07:47.916192 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/83679b73-67d4-4733-b362-44060f589afd-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"83679b73-67d4-4733-b362-44060f589afd\") " pod="openstack/rabbitmq-server-0" Nov 21 14:07:47 crc kubenswrapper[5133]: I1121 14:07:47.916217 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/83679b73-67d4-4733-b362-44060f589afd-pod-info\") pod \"rabbitmq-server-0\" (UID: \"83679b73-67d4-4733-b362-44060f589afd\") " pod="openstack/rabbitmq-server-0" Nov 21 14:07:47 crc kubenswrapper[5133]: I1121 14:07:47.916240 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"83679b73-67d4-4733-b362-44060f589afd\") " pod="openstack/rabbitmq-server-0" Nov 21 14:07:47 crc kubenswrapper[5133]: I1121 14:07:47.916403 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/83679b73-67d4-4733-b362-44060f589afd-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"83679b73-67d4-4733-b362-44060f589afd\") " pod="openstack/rabbitmq-server-0" Nov 21 14:07:47 crc kubenswrapper[5133]: I1121 14:07:47.916546 5133 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"83679b73-67d4-4733-b362-44060f589afd\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/rabbitmq-server-0" Nov 21 14:07:47 crc kubenswrapper[5133]: I1121 14:07:47.916904 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/83679b73-67d4-4733-b362-44060f589afd-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"83679b73-67d4-4733-b362-44060f589afd\") " pod="openstack/rabbitmq-server-0" Nov 21 14:07:47 crc kubenswrapper[5133]: I1121 14:07:47.917422 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/83679b73-67d4-4733-b362-44060f589afd-server-conf\") pod \"rabbitmq-server-0\" (UID: \"83679b73-67d4-4733-b362-44060f589afd\") " pod="openstack/rabbitmq-server-0" Nov 21 14:07:47 crc kubenswrapper[5133]: I1121 14:07:47.918678 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/83679b73-67d4-4733-b362-44060f589afd-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"83679b73-67d4-4733-b362-44060f589afd\") " pod="openstack/rabbitmq-server-0" Nov 21 14:07:47 crc kubenswrapper[5133]: I1121 14:07:47.919553 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/83679b73-67d4-4733-b362-44060f589afd-config-data\") pod \"rabbitmq-server-0\" (UID: \"83679b73-67d4-4733-b362-44060f589afd\") " pod="openstack/rabbitmq-server-0" Nov 21 14:07:47 crc kubenswrapper[5133]: I1121 14:07:47.921487 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/83679b73-67d4-4733-b362-44060f589afd-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"83679b73-67d4-4733-b362-44060f589afd\") " pod="openstack/rabbitmq-server-0" Nov 21 14:07:47 crc kubenswrapper[5133]: I1121 14:07:47.922606 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/83679b73-67d4-4733-b362-44060f589afd-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"83679b73-67d4-4733-b362-44060f589afd\") " pod="openstack/rabbitmq-server-0" Nov 21 14:07:47 crc kubenswrapper[5133]: I1121 14:07:47.927881 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/83679b73-67d4-4733-b362-44060f589afd-pod-info\") pod \"rabbitmq-server-0\" (UID: \"83679b73-67d4-4733-b362-44060f589afd\") " pod="openstack/rabbitmq-server-0" Nov 21 14:07:47 crc kubenswrapper[5133]: I1121 14:07:47.930517 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/83679b73-67d4-4733-b362-44060f589afd-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"83679b73-67d4-4733-b362-44060f589afd\") " pod="openstack/rabbitmq-server-0" Nov 21 14:07:47 crc kubenswrapper[5133]: I1121 14:07:47.934883 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j5tkh\" (UniqueName: \"kubernetes.io/projected/83679b73-67d4-4733-b362-44060f589afd-kube-api-access-j5tkh\") pod \"rabbitmq-server-0\" (UID: \"83679b73-67d4-4733-b362-44060f589afd\") " pod="openstack/rabbitmq-server-0" Nov 21 14:07:47 crc kubenswrapper[5133]: I1121 14:07:47.956049 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"83679b73-67d4-4733-b362-44060f589afd\") " pod="openstack/rabbitmq-server-0" Nov 21 14:07:47 crc kubenswrapper[5133]: I1121 14:07:47.981533 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.015089 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.119696 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"2c9873f2-025e-499f-8a76-47c38495fd75\" (UID: \"2c9873f2-025e-499f-8a76-47c38495fd75\") " Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.119770 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/2c9873f2-025e-499f-8a76-47c38495fd75-server-conf\") pod \"2c9873f2-025e-499f-8a76-47c38495fd75\" (UID: \"2c9873f2-025e-499f-8a76-47c38495fd75\") " Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.119806 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/2c9873f2-025e-499f-8a76-47c38495fd75-rabbitmq-plugins\") pod \"2c9873f2-025e-499f-8a76-47c38495fd75\" (UID: \"2c9873f2-025e-499f-8a76-47c38495fd75\") " Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.119891 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/2c9873f2-025e-499f-8a76-47c38495fd75-pod-info\") pod \"2c9873f2-025e-499f-8a76-47c38495fd75\" (UID: \"2c9873f2-025e-499f-8a76-47c38495fd75\") " Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.119945 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/2c9873f2-025e-499f-8a76-47c38495fd75-plugins-conf\") pod \"2c9873f2-025e-499f-8a76-47c38495fd75\" (UID: \"2c9873f2-025e-499f-8a76-47c38495fd75\") " Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.119976 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f4scz\" (UniqueName: \"kubernetes.io/projected/2c9873f2-025e-499f-8a76-47c38495fd75-kube-api-access-f4scz\") pod \"2c9873f2-025e-499f-8a76-47c38495fd75\" (UID: \"2c9873f2-025e-499f-8a76-47c38495fd75\") " Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.120055 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/2c9873f2-025e-499f-8a76-47c38495fd75-rabbitmq-confd\") pod \"2c9873f2-025e-499f-8a76-47c38495fd75\" (UID: \"2c9873f2-025e-499f-8a76-47c38495fd75\") " Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.120138 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/2c9873f2-025e-499f-8a76-47c38495fd75-rabbitmq-tls\") pod \"2c9873f2-025e-499f-8a76-47c38495fd75\" (UID: \"2c9873f2-025e-499f-8a76-47c38495fd75\") " Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.120221 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/2c9873f2-025e-499f-8a76-47c38495fd75-erlang-cookie-secret\") pod \"2c9873f2-025e-499f-8a76-47c38495fd75\" (UID: \"2c9873f2-025e-499f-8a76-47c38495fd75\") " Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.120347 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/2c9873f2-025e-499f-8a76-47c38495fd75-rabbitmq-erlang-cookie\") pod \"2c9873f2-025e-499f-8a76-47c38495fd75\" (UID: \"2c9873f2-025e-499f-8a76-47c38495fd75\") " Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.121426 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2c9873f2-025e-499f-8a76-47c38495fd75-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "2c9873f2-025e-499f-8a76-47c38495fd75" (UID: "2c9873f2-025e-499f-8a76-47c38495fd75"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.121989 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2c9873f2-025e-499f-8a76-47c38495fd75-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "2c9873f2-025e-499f-8a76-47c38495fd75" (UID: "2c9873f2-025e-499f-8a76-47c38495fd75"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.122409 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2c9873f2-025e-499f-8a76-47c38495fd75-config-data\") pod \"2c9873f2-025e-499f-8a76-47c38495fd75\" (UID: \"2c9873f2-025e-499f-8a76-47c38495fd75\") " Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.123657 5133 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/2c9873f2-025e-499f-8a76-47c38495fd75-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.123769 5133 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/2c9873f2-025e-499f-8a76-47c38495fd75-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.124726 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2c9873f2-025e-499f-8a76-47c38495fd75-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "2c9873f2-025e-499f-8a76-47c38495fd75" (UID: "2c9873f2-025e-499f-8a76-47c38495fd75"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.126954 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2c9873f2-025e-499f-8a76-47c38495fd75-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "2c9873f2-025e-499f-8a76-47c38495fd75" (UID: "2c9873f2-025e-499f-8a76-47c38495fd75"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.127198 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/2c9873f2-025e-499f-8a76-47c38495fd75-pod-info" (OuterVolumeSpecName: "pod-info") pod "2c9873f2-025e-499f-8a76-47c38495fd75" (UID: "2c9873f2-025e-499f-8a76-47c38495fd75"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.128687 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2c9873f2-025e-499f-8a76-47c38495fd75-kube-api-access-f4scz" (OuterVolumeSpecName: "kube-api-access-f4scz") pod "2c9873f2-025e-499f-8a76-47c38495fd75" (UID: "2c9873f2-025e-499f-8a76-47c38495fd75"). InnerVolumeSpecName "kube-api-access-f4scz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.129521 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage10-crc" (OuterVolumeSpecName: "persistence") pod "2c9873f2-025e-499f-8a76-47c38495fd75" (UID: "2c9873f2-025e-499f-8a76-47c38495fd75"). InnerVolumeSpecName "local-storage10-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.136721 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2c9873f2-025e-499f-8a76-47c38495fd75-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "2c9873f2-025e-499f-8a76-47c38495fd75" (UID: "2c9873f2-025e-499f-8a76-47c38495fd75"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.155825 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2c9873f2-025e-499f-8a76-47c38495fd75-config-data" (OuterVolumeSpecName: "config-data") pod "2c9873f2-025e-499f-8a76-47c38495fd75" (UID: "2c9873f2-025e-499f-8a76-47c38495fd75"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.174230 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2c9873f2-025e-499f-8a76-47c38495fd75-server-conf" (OuterVolumeSpecName: "server-conf") pod "2c9873f2-025e-499f-8a76-47c38495fd75" (UID: "2c9873f2-025e-499f-8a76-47c38495fd75"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.225010 5133 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/2c9873f2-025e-499f-8a76-47c38495fd75-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.225036 5133 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/2c9873f2-025e-499f-8a76-47c38495fd75-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.225049 5133 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2c9873f2-025e-499f-8a76-47c38495fd75-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.225075 5133 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" " Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.225085 5133 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/2c9873f2-025e-499f-8a76-47c38495fd75-server-conf\") on node \"crc\" DevicePath \"\"" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.225093 5133 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/2c9873f2-025e-499f-8a76-47c38495fd75-pod-info\") on node \"crc\" DevicePath \"\"" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.225101 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f4scz\" (UniqueName: \"kubernetes.io/projected/2c9873f2-025e-499f-8a76-47c38495fd75-kube-api-access-f4scz\") on node \"crc\" DevicePath \"\"" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.225110 5133 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/2c9873f2-025e-499f-8a76-47c38495fd75-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.250200 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2c9873f2-025e-499f-8a76-47c38495fd75-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "2c9873f2-025e-499f-8a76-47c38495fd75" (UID: "2c9873f2-025e-499f-8a76-47c38495fd75"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.255828 5133 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage10-crc" (UniqueName: "kubernetes.io/local-volume/local-storage10-crc") on node "crc" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.326946 5133 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/2c9873f2-025e-499f-8a76-47c38495fd75-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.326985 5133 reconciler_common.go:293] "Volume detached for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" DevicePath \"\"" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.476184 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9aa1caed-f687-4526-a851-59b4d192b705" path="/var/lib/kubelet/pods/9aa1caed-f687-4526-a851-59b4d192b705/volumes" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.477440 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.588211 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"83679b73-67d4-4733-b362-44060f589afd","Type":"ContainerStarted","Data":"03525c0c6efd6959b718821f5c27836c8856e80745708db06c4bf93c5aed6a1f"} Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.591665 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"2c9873f2-025e-499f-8a76-47c38495fd75","Type":"ContainerDied","Data":"46043a57ce59dfbde180cf1937fded57281ada12bb3a56e8c9930a013963a006"} Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.591723 5133 scope.go:117] "RemoveContainer" containerID="e453506f9e3a42b8e238b5fe885a82325983fce8b3d35ff909c123419ee46015" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.591740 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.616693 5133 scope.go:117] "RemoveContainer" containerID="7743d3ae5f3d1730d9ae8514c383685ab59cb65808a01679f0e73dc16f30e218" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.618873 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.625574 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.651905 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 21 14:07:48 crc kubenswrapper[5133]: E1121 14:07:48.652299 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c9873f2-025e-499f-8a76-47c38495fd75" containerName="setup-container" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.652317 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c9873f2-025e-499f-8a76-47c38495fd75" containerName="setup-container" Nov 21 14:07:48 crc kubenswrapper[5133]: E1121 14:07:48.652334 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c9873f2-025e-499f-8a76-47c38495fd75" containerName="rabbitmq" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.652341 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c9873f2-025e-499f-8a76-47c38495fd75" containerName="rabbitmq" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.652536 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="2c9873f2-025e-499f-8a76-47c38495fd75" containerName="rabbitmq" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.653855 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.658805 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.659148 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.659302 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-jqhmq" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.659499 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.659634 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.659779 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.660672 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.672787 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.736970 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/45d066aa-3bb1-4fdc-8c88-c384dd156e93-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"45d066aa-3bb1-4fdc-8c88-c384dd156e93\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.737053 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/45d066aa-3bb1-4fdc-8c88-c384dd156e93-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"45d066aa-3bb1-4fdc-8c88-c384dd156e93\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.737107 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"45d066aa-3bb1-4fdc-8c88-c384dd156e93\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.737126 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/45d066aa-3bb1-4fdc-8c88-c384dd156e93-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"45d066aa-3bb1-4fdc-8c88-c384dd156e93\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.737418 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/45d066aa-3bb1-4fdc-8c88-c384dd156e93-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"45d066aa-3bb1-4fdc-8c88-c384dd156e93\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.737492 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/45d066aa-3bb1-4fdc-8c88-c384dd156e93-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"45d066aa-3bb1-4fdc-8c88-c384dd156e93\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.737541 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/45d066aa-3bb1-4fdc-8c88-c384dd156e93-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"45d066aa-3bb1-4fdc-8c88-c384dd156e93\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.737571 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/45d066aa-3bb1-4fdc-8c88-c384dd156e93-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"45d066aa-3bb1-4fdc-8c88-c384dd156e93\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.737599 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/45d066aa-3bb1-4fdc-8c88-c384dd156e93-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"45d066aa-3bb1-4fdc-8c88-c384dd156e93\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.737641 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-86jv9\" (UniqueName: \"kubernetes.io/projected/45d066aa-3bb1-4fdc-8c88-c384dd156e93-kube-api-access-86jv9\") pod \"rabbitmq-cell1-server-0\" (UID: \"45d066aa-3bb1-4fdc-8c88-c384dd156e93\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.737694 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/45d066aa-3bb1-4fdc-8c88-c384dd156e93-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"45d066aa-3bb1-4fdc-8c88-c384dd156e93\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.839341 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/45d066aa-3bb1-4fdc-8c88-c384dd156e93-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"45d066aa-3bb1-4fdc-8c88-c384dd156e93\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.839751 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/45d066aa-3bb1-4fdc-8c88-c384dd156e93-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"45d066aa-3bb1-4fdc-8c88-c384dd156e93\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.839798 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/45d066aa-3bb1-4fdc-8c88-c384dd156e93-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"45d066aa-3bb1-4fdc-8c88-c384dd156e93\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.839848 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"45d066aa-3bb1-4fdc-8c88-c384dd156e93\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.839873 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/45d066aa-3bb1-4fdc-8c88-c384dd156e93-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"45d066aa-3bb1-4fdc-8c88-c384dd156e93\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.839909 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/45d066aa-3bb1-4fdc-8c88-c384dd156e93-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"45d066aa-3bb1-4fdc-8c88-c384dd156e93\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.839963 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/45d066aa-3bb1-4fdc-8c88-c384dd156e93-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"45d066aa-3bb1-4fdc-8c88-c384dd156e93\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.839905 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/45d066aa-3bb1-4fdc-8c88-c384dd156e93-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"45d066aa-3bb1-4fdc-8c88-c384dd156e93\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.840025 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/45d066aa-3bb1-4fdc-8c88-c384dd156e93-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"45d066aa-3bb1-4fdc-8c88-c384dd156e93\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.840056 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/45d066aa-3bb1-4fdc-8c88-c384dd156e93-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"45d066aa-3bb1-4fdc-8c88-c384dd156e93\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.840083 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/45d066aa-3bb1-4fdc-8c88-c384dd156e93-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"45d066aa-3bb1-4fdc-8c88-c384dd156e93\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.840126 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-86jv9\" (UniqueName: \"kubernetes.io/projected/45d066aa-3bb1-4fdc-8c88-c384dd156e93-kube-api-access-86jv9\") pod \"rabbitmq-cell1-server-0\" (UID: \"45d066aa-3bb1-4fdc-8c88-c384dd156e93\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.840313 5133 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"45d066aa-3bb1-4fdc-8c88-c384dd156e93\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.840895 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/45d066aa-3bb1-4fdc-8c88-c384dd156e93-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"45d066aa-3bb1-4fdc-8c88-c384dd156e93\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.841276 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/45d066aa-3bb1-4fdc-8c88-c384dd156e93-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"45d066aa-3bb1-4fdc-8c88-c384dd156e93\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.841333 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/45d066aa-3bb1-4fdc-8c88-c384dd156e93-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"45d066aa-3bb1-4fdc-8c88-c384dd156e93\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.843957 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/45d066aa-3bb1-4fdc-8c88-c384dd156e93-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"45d066aa-3bb1-4fdc-8c88-c384dd156e93\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.845266 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/45d066aa-3bb1-4fdc-8c88-c384dd156e93-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"45d066aa-3bb1-4fdc-8c88-c384dd156e93\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.846799 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/45d066aa-3bb1-4fdc-8c88-c384dd156e93-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"45d066aa-3bb1-4fdc-8c88-c384dd156e93\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.847740 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/45d066aa-3bb1-4fdc-8c88-c384dd156e93-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"45d066aa-3bb1-4fdc-8c88-c384dd156e93\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.848234 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/45d066aa-3bb1-4fdc-8c88-c384dd156e93-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"45d066aa-3bb1-4fdc-8c88-c384dd156e93\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.860315 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-578b8d767c-bn5mn"] Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.861769 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-578b8d767c-bn5mn" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.865159 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-edpm-ipam" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.870119 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-86jv9\" (UniqueName: \"kubernetes.io/projected/45d066aa-3bb1-4fdc-8c88-c384dd156e93-kube-api-access-86jv9\") pod \"rabbitmq-cell1-server-0\" (UID: \"45d066aa-3bb1-4fdc-8c88-c384dd156e93\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.874660 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-578b8d767c-bn5mn"] Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.875410 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"45d066aa-3bb1-4fdc-8c88-c384dd156e93\") " pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.941063 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gc28w\" (UniqueName: \"kubernetes.io/projected/559830d9-5f39-4555-ba6f-0ee5c6c6a12a-kube-api-access-gc28w\") pod \"dnsmasq-dns-578b8d767c-bn5mn\" (UID: \"559830d9-5f39-4555-ba6f-0ee5c6c6a12a\") " pod="openstack/dnsmasq-dns-578b8d767c-bn5mn" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.941154 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/559830d9-5f39-4555-ba6f-0ee5c6c6a12a-openstack-edpm-ipam\") pod \"dnsmasq-dns-578b8d767c-bn5mn\" (UID: \"559830d9-5f39-4555-ba6f-0ee5c6c6a12a\") " pod="openstack/dnsmasq-dns-578b8d767c-bn5mn" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.941194 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/559830d9-5f39-4555-ba6f-0ee5c6c6a12a-dns-svc\") pod \"dnsmasq-dns-578b8d767c-bn5mn\" (UID: \"559830d9-5f39-4555-ba6f-0ee5c6c6a12a\") " pod="openstack/dnsmasq-dns-578b8d767c-bn5mn" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.941217 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/559830d9-5f39-4555-ba6f-0ee5c6c6a12a-ovsdbserver-sb\") pod \"dnsmasq-dns-578b8d767c-bn5mn\" (UID: \"559830d9-5f39-4555-ba6f-0ee5c6c6a12a\") " pod="openstack/dnsmasq-dns-578b8d767c-bn5mn" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.941240 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/559830d9-5f39-4555-ba6f-0ee5c6c6a12a-ovsdbserver-nb\") pod \"dnsmasq-dns-578b8d767c-bn5mn\" (UID: \"559830d9-5f39-4555-ba6f-0ee5c6c6a12a\") " pod="openstack/dnsmasq-dns-578b8d767c-bn5mn" Nov 21 14:07:48 crc kubenswrapper[5133]: I1121 14:07:48.941365 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/559830d9-5f39-4555-ba6f-0ee5c6c6a12a-config\") pod \"dnsmasq-dns-578b8d767c-bn5mn\" (UID: \"559830d9-5f39-4555-ba6f-0ee5c6c6a12a\") " pod="openstack/dnsmasq-dns-578b8d767c-bn5mn" Nov 21 14:07:49 crc kubenswrapper[5133]: I1121 14:07:49.019603 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:07:49 crc kubenswrapper[5133]: I1121 14:07:49.042437 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/559830d9-5f39-4555-ba6f-0ee5c6c6a12a-openstack-edpm-ipam\") pod \"dnsmasq-dns-578b8d767c-bn5mn\" (UID: \"559830d9-5f39-4555-ba6f-0ee5c6c6a12a\") " pod="openstack/dnsmasq-dns-578b8d767c-bn5mn" Nov 21 14:07:49 crc kubenswrapper[5133]: I1121 14:07:49.042510 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/559830d9-5f39-4555-ba6f-0ee5c6c6a12a-dns-svc\") pod \"dnsmasq-dns-578b8d767c-bn5mn\" (UID: \"559830d9-5f39-4555-ba6f-0ee5c6c6a12a\") " pod="openstack/dnsmasq-dns-578b8d767c-bn5mn" Nov 21 14:07:49 crc kubenswrapper[5133]: I1121 14:07:49.042545 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/559830d9-5f39-4555-ba6f-0ee5c6c6a12a-ovsdbserver-sb\") pod \"dnsmasq-dns-578b8d767c-bn5mn\" (UID: \"559830d9-5f39-4555-ba6f-0ee5c6c6a12a\") " pod="openstack/dnsmasq-dns-578b8d767c-bn5mn" Nov 21 14:07:49 crc kubenswrapper[5133]: I1121 14:07:49.042571 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/559830d9-5f39-4555-ba6f-0ee5c6c6a12a-ovsdbserver-nb\") pod \"dnsmasq-dns-578b8d767c-bn5mn\" (UID: \"559830d9-5f39-4555-ba6f-0ee5c6c6a12a\") " pod="openstack/dnsmasq-dns-578b8d767c-bn5mn" Nov 21 14:07:49 crc kubenswrapper[5133]: I1121 14:07:49.042633 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/559830d9-5f39-4555-ba6f-0ee5c6c6a12a-config\") pod \"dnsmasq-dns-578b8d767c-bn5mn\" (UID: \"559830d9-5f39-4555-ba6f-0ee5c6c6a12a\") " pod="openstack/dnsmasq-dns-578b8d767c-bn5mn" Nov 21 14:07:49 crc kubenswrapper[5133]: I1121 14:07:49.042661 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gc28w\" (UniqueName: \"kubernetes.io/projected/559830d9-5f39-4555-ba6f-0ee5c6c6a12a-kube-api-access-gc28w\") pod \"dnsmasq-dns-578b8d767c-bn5mn\" (UID: \"559830d9-5f39-4555-ba6f-0ee5c6c6a12a\") " pod="openstack/dnsmasq-dns-578b8d767c-bn5mn" Nov 21 14:07:49 crc kubenswrapper[5133]: I1121 14:07:49.043465 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/559830d9-5f39-4555-ba6f-0ee5c6c6a12a-dns-svc\") pod \"dnsmasq-dns-578b8d767c-bn5mn\" (UID: \"559830d9-5f39-4555-ba6f-0ee5c6c6a12a\") " pod="openstack/dnsmasq-dns-578b8d767c-bn5mn" Nov 21 14:07:49 crc kubenswrapper[5133]: I1121 14:07:49.043489 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/559830d9-5f39-4555-ba6f-0ee5c6c6a12a-config\") pod \"dnsmasq-dns-578b8d767c-bn5mn\" (UID: \"559830d9-5f39-4555-ba6f-0ee5c6c6a12a\") " pod="openstack/dnsmasq-dns-578b8d767c-bn5mn" Nov 21 14:07:49 crc kubenswrapper[5133]: I1121 14:07:49.043586 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/559830d9-5f39-4555-ba6f-0ee5c6c6a12a-ovsdbserver-sb\") pod \"dnsmasq-dns-578b8d767c-bn5mn\" (UID: \"559830d9-5f39-4555-ba6f-0ee5c6c6a12a\") " pod="openstack/dnsmasq-dns-578b8d767c-bn5mn" Nov 21 14:07:49 crc kubenswrapper[5133]: I1121 14:07:49.043733 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/559830d9-5f39-4555-ba6f-0ee5c6c6a12a-ovsdbserver-nb\") pod \"dnsmasq-dns-578b8d767c-bn5mn\" (UID: \"559830d9-5f39-4555-ba6f-0ee5c6c6a12a\") " pod="openstack/dnsmasq-dns-578b8d767c-bn5mn" Nov 21 14:07:49 crc kubenswrapper[5133]: I1121 14:07:49.044080 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/559830d9-5f39-4555-ba6f-0ee5c6c6a12a-openstack-edpm-ipam\") pod \"dnsmasq-dns-578b8d767c-bn5mn\" (UID: \"559830d9-5f39-4555-ba6f-0ee5c6c6a12a\") " pod="openstack/dnsmasq-dns-578b8d767c-bn5mn" Nov 21 14:07:49 crc kubenswrapper[5133]: I1121 14:07:49.059132 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gc28w\" (UniqueName: \"kubernetes.io/projected/559830d9-5f39-4555-ba6f-0ee5c6c6a12a-kube-api-access-gc28w\") pod \"dnsmasq-dns-578b8d767c-bn5mn\" (UID: \"559830d9-5f39-4555-ba6f-0ee5c6c6a12a\") " pod="openstack/dnsmasq-dns-578b8d767c-bn5mn" Nov 21 14:07:49 crc kubenswrapper[5133]: I1121 14:07:49.229318 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-578b8d767c-bn5mn" Nov 21 14:07:49 crc kubenswrapper[5133]: I1121 14:07:49.462859 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 21 14:07:49 crc kubenswrapper[5133]: W1121 14:07:49.472641 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod45d066aa_3bb1_4fdc_8c88_c384dd156e93.slice/crio-aa395b70f3fdac8d60a715f57b5069608c184aaa6784dce63f50d964f1bf8627 WatchSource:0}: Error finding container aa395b70f3fdac8d60a715f57b5069608c184aaa6784dce63f50d964f1bf8627: Status 404 returned error can't find the container with id aa395b70f3fdac8d60a715f57b5069608c184aaa6784dce63f50d964f1bf8627 Nov 21 14:07:49 crc kubenswrapper[5133]: I1121 14:07:49.604030 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"45d066aa-3bb1-4fdc-8c88-c384dd156e93","Type":"ContainerStarted","Data":"aa395b70f3fdac8d60a715f57b5069608c184aaa6784dce63f50d964f1bf8627"} Nov 21 14:07:49 crc kubenswrapper[5133]: I1121 14:07:49.726042 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-578b8d767c-bn5mn"] Nov 21 14:07:49 crc kubenswrapper[5133]: W1121 14:07:49.913168 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod559830d9_5f39_4555_ba6f_0ee5c6c6a12a.slice/crio-ba412428ac31ff1905e1d25b13c2747a4e8576e94970aff276cbfe57e2219d7e WatchSource:0}: Error finding container ba412428ac31ff1905e1d25b13c2747a4e8576e94970aff276cbfe57e2219d7e: Status 404 returned error can't find the container with id ba412428ac31ff1905e1d25b13c2747a4e8576e94970aff276cbfe57e2219d7e Nov 21 14:07:50 crc kubenswrapper[5133]: I1121 14:07:50.467633 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2c9873f2-025e-499f-8a76-47c38495fd75" path="/var/lib/kubelet/pods/2c9873f2-025e-499f-8a76-47c38495fd75/volumes" Nov 21 14:07:50 crc kubenswrapper[5133]: I1121 14:07:50.618691 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"83679b73-67d4-4733-b362-44060f589afd","Type":"ContainerStarted","Data":"bc7080a86c95a6996d4938ec1b9cb99705d3e5315d0977cc88547cdb8ca59e82"} Nov 21 14:07:50 crc kubenswrapper[5133]: I1121 14:07:50.622238 5133 generic.go:334] "Generic (PLEG): container finished" podID="559830d9-5f39-4555-ba6f-0ee5c6c6a12a" containerID="df6c3e2b1de9f65fac0acbcb7db14947616b66f0f1b6c6d70e9c8c5460199988" exitCode=0 Nov 21 14:07:50 crc kubenswrapper[5133]: I1121 14:07:50.622287 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-578b8d767c-bn5mn" event={"ID":"559830d9-5f39-4555-ba6f-0ee5c6c6a12a","Type":"ContainerDied","Data":"df6c3e2b1de9f65fac0acbcb7db14947616b66f0f1b6c6d70e9c8c5460199988"} Nov 21 14:07:50 crc kubenswrapper[5133]: I1121 14:07:50.622317 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-578b8d767c-bn5mn" event={"ID":"559830d9-5f39-4555-ba6f-0ee5c6c6a12a","Type":"ContainerStarted","Data":"ba412428ac31ff1905e1d25b13c2747a4e8576e94970aff276cbfe57e2219d7e"} Nov 21 14:07:51 crc kubenswrapper[5133]: I1121 14:07:51.635260 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-578b8d767c-bn5mn" event={"ID":"559830d9-5f39-4555-ba6f-0ee5c6c6a12a","Type":"ContainerStarted","Data":"06d19524ac3e1dd23dd5276cc6f55e1ec4b0509a0c73a97aaeb87f00cc61a55a"} Nov 21 14:07:51 crc kubenswrapper[5133]: I1121 14:07:51.636429 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-578b8d767c-bn5mn" Nov 21 14:07:52 crc kubenswrapper[5133]: I1121 14:07:52.547887 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-578b8d767c-bn5mn" podStartSLOduration=4.547859979 podStartE2EDuration="4.547859979s" podCreationTimestamp="2025-11-21 14:07:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 14:07:51.659358482 +0000 UTC m=+1531.457190760" watchObservedRunningTime="2025-11-21 14:07:52.547859979 +0000 UTC m=+1532.345692257" Nov 21 14:07:52 crc kubenswrapper[5133]: I1121 14:07:52.650221 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"45d066aa-3bb1-4fdc-8c88-c384dd156e93","Type":"ContainerStarted","Data":"5e668bd92219d450e80edea4e46348a441be0c959200a45f10060d000ca12b29"} Nov 21 14:07:59 crc kubenswrapper[5133]: I1121 14:07:59.231355 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-578b8d767c-bn5mn" Nov 21 14:07:59 crc kubenswrapper[5133]: I1121 14:07:59.287822 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-68d4b6d797-qk78s"] Nov 21 14:07:59 crc kubenswrapper[5133]: I1121 14:07:59.288135 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-68d4b6d797-qk78s" podUID="33fa8cc5-ffef-4e3b-8816-e643c09ff259" containerName="dnsmasq-dns" containerID="cri-o://9d8af020fcabe90d00acb5ecde08a3014609bb733173a3643bb6fa952f36c8f8" gracePeriod=10 Nov 21 14:07:59 crc kubenswrapper[5133]: I1121 14:07:59.573048 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-fbc59fbb7-dcjpl"] Nov 21 14:07:59 crc kubenswrapper[5133]: I1121 14:07:59.575468 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-fbc59fbb7-dcjpl" Nov 21 14:07:59 crc kubenswrapper[5133]: I1121 14:07:59.607982 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-fbc59fbb7-dcjpl"] Nov 21 14:07:59 crc kubenswrapper[5133]: I1121 14:07:59.669168 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/74c8b0ac-4ff7-4fea-bff7-7a6d9356e745-dns-svc\") pod \"dnsmasq-dns-fbc59fbb7-dcjpl\" (UID: \"74c8b0ac-4ff7-4fea-bff7-7a6d9356e745\") " pod="openstack/dnsmasq-dns-fbc59fbb7-dcjpl" Nov 21 14:07:59 crc kubenswrapper[5133]: I1121 14:07:59.669238 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/74c8b0ac-4ff7-4fea-bff7-7a6d9356e745-ovsdbserver-sb\") pod \"dnsmasq-dns-fbc59fbb7-dcjpl\" (UID: \"74c8b0ac-4ff7-4fea-bff7-7a6d9356e745\") " pod="openstack/dnsmasq-dns-fbc59fbb7-dcjpl" Nov 21 14:07:59 crc kubenswrapper[5133]: I1121 14:07:59.669294 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/74c8b0ac-4ff7-4fea-bff7-7a6d9356e745-openstack-edpm-ipam\") pod \"dnsmasq-dns-fbc59fbb7-dcjpl\" (UID: \"74c8b0ac-4ff7-4fea-bff7-7a6d9356e745\") " pod="openstack/dnsmasq-dns-fbc59fbb7-dcjpl" Nov 21 14:07:59 crc kubenswrapper[5133]: I1121 14:07:59.669360 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/74c8b0ac-4ff7-4fea-bff7-7a6d9356e745-config\") pod \"dnsmasq-dns-fbc59fbb7-dcjpl\" (UID: \"74c8b0ac-4ff7-4fea-bff7-7a6d9356e745\") " pod="openstack/dnsmasq-dns-fbc59fbb7-dcjpl" Nov 21 14:07:59 crc kubenswrapper[5133]: I1121 14:07:59.669397 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/74c8b0ac-4ff7-4fea-bff7-7a6d9356e745-ovsdbserver-nb\") pod \"dnsmasq-dns-fbc59fbb7-dcjpl\" (UID: \"74c8b0ac-4ff7-4fea-bff7-7a6d9356e745\") " pod="openstack/dnsmasq-dns-fbc59fbb7-dcjpl" Nov 21 14:07:59 crc kubenswrapper[5133]: I1121 14:07:59.669418 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-59pqc\" (UniqueName: \"kubernetes.io/projected/74c8b0ac-4ff7-4fea-bff7-7a6d9356e745-kube-api-access-59pqc\") pod \"dnsmasq-dns-fbc59fbb7-dcjpl\" (UID: \"74c8b0ac-4ff7-4fea-bff7-7a6d9356e745\") " pod="openstack/dnsmasq-dns-fbc59fbb7-dcjpl" Nov 21 14:07:59 crc kubenswrapper[5133]: I1121 14:07:59.765575 5133 generic.go:334] "Generic (PLEG): container finished" podID="33fa8cc5-ffef-4e3b-8816-e643c09ff259" containerID="9d8af020fcabe90d00acb5ecde08a3014609bb733173a3643bb6fa952f36c8f8" exitCode=0 Nov 21 14:07:59 crc kubenswrapper[5133]: I1121 14:07:59.765631 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68d4b6d797-qk78s" event={"ID":"33fa8cc5-ffef-4e3b-8816-e643c09ff259","Type":"ContainerDied","Data":"9d8af020fcabe90d00acb5ecde08a3014609bb733173a3643bb6fa952f36c8f8"} Nov 21 14:07:59 crc kubenswrapper[5133]: I1121 14:07:59.771260 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/74c8b0ac-4ff7-4fea-bff7-7a6d9356e745-openstack-edpm-ipam\") pod \"dnsmasq-dns-fbc59fbb7-dcjpl\" (UID: \"74c8b0ac-4ff7-4fea-bff7-7a6d9356e745\") " pod="openstack/dnsmasq-dns-fbc59fbb7-dcjpl" Nov 21 14:07:59 crc kubenswrapper[5133]: I1121 14:07:59.771359 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/74c8b0ac-4ff7-4fea-bff7-7a6d9356e745-config\") pod \"dnsmasq-dns-fbc59fbb7-dcjpl\" (UID: \"74c8b0ac-4ff7-4fea-bff7-7a6d9356e745\") " pod="openstack/dnsmasq-dns-fbc59fbb7-dcjpl" Nov 21 14:07:59 crc kubenswrapper[5133]: I1121 14:07:59.771396 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/74c8b0ac-4ff7-4fea-bff7-7a6d9356e745-ovsdbserver-nb\") pod \"dnsmasq-dns-fbc59fbb7-dcjpl\" (UID: \"74c8b0ac-4ff7-4fea-bff7-7a6d9356e745\") " pod="openstack/dnsmasq-dns-fbc59fbb7-dcjpl" Nov 21 14:07:59 crc kubenswrapper[5133]: I1121 14:07:59.771421 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-59pqc\" (UniqueName: \"kubernetes.io/projected/74c8b0ac-4ff7-4fea-bff7-7a6d9356e745-kube-api-access-59pqc\") pod \"dnsmasq-dns-fbc59fbb7-dcjpl\" (UID: \"74c8b0ac-4ff7-4fea-bff7-7a6d9356e745\") " pod="openstack/dnsmasq-dns-fbc59fbb7-dcjpl" Nov 21 14:07:59 crc kubenswrapper[5133]: I1121 14:07:59.771504 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/74c8b0ac-4ff7-4fea-bff7-7a6d9356e745-dns-svc\") pod \"dnsmasq-dns-fbc59fbb7-dcjpl\" (UID: \"74c8b0ac-4ff7-4fea-bff7-7a6d9356e745\") " pod="openstack/dnsmasq-dns-fbc59fbb7-dcjpl" Nov 21 14:07:59 crc kubenswrapper[5133]: I1121 14:07:59.771543 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/74c8b0ac-4ff7-4fea-bff7-7a6d9356e745-ovsdbserver-sb\") pod \"dnsmasq-dns-fbc59fbb7-dcjpl\" (UID: \"74c8b0ac-4ff7-4fea-bff7-7a6d9356e745\") " pod="openstack/dnsmasq-dns-fbc59fbb7-dcjpl" Nov 21 14:07:59 crc kubenswrapper[5133]: I1121 14:07:59.772582 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/74c8b0ac-4ff7-4fea-bff7-7a6d9356e745-ovsdbserver-sb\") pod \"dnsmasq-dns-fbc59fbb7-dcjpl\" (UID: \"74c8b0ac-4ff7-4fea-bff7-7a6d9356e745\") " pod="openstack/dnsmasq-dns-fbc59fbb7-dcjpl" Nov 21 14:07:59 crc kubenswrapper[5133]: I1121 14:07:59.775568 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/74c8b0ac-4ff7-4fea-bff7-7a6d9356e745-ovsdbserver-nb\") pod \"dnsmasq-dns-fbc59fbb7-dcjpl\" (UID: \"74c8b0ac-4ff7-4fea-bff7-7a6d9356e745\") " pod="openstack/dnsmasq-dns-fbc59fbb7-dcjpl" Nov 21 14:07:59 crc kubenswrapper[5133]: I1121 14:07:59.776103 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/74c8b0ac-4ff7-4fea-bff7-7a6d9356e745-openstack-edpm-ipam\") pod \"dnsmasq-dns-fbc59fbb7-dcjpl\" (UID: \"74c8b0ac-4ff7-4fea-bff7-7a6d9356e745\") " pod="openstack/dnsmasq-dns-fbc59fbb7-dcjpl" Nov 21 14:07:59 crc kubenswrapper[5133]: I1121 14:07:59.776584 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/74c8b0ac-4ff7-4fea-bff7-7a6d9356e745-config\") pod \"dnsmasq-dns-fbc59fbb7-dcjpl\" (UID: \"74c8b0ac-4ff7-4fea-bff7-7a6d9356e745\") " pod="openstack/dnsmasq-dns-fbc59fbb7-dcjpl" Nov 21 14:07:59 crc kubenswrapper[5133]: I1121 14:07:59.777146 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/74c8b0ac-4ff7-4fea-bff7-7a6d9356e745-dns-svc\") pod \"dnsmasq-dns-fbc59fbb7-dcjpl\" (UID: \"74c8b0ac-4ff7-4fea-bff7-7a6d9356e745\") " pod="openstack/dnsmasq-dns-fbc59fbb7-dcjpl" Nov 21 14:07:59 crc kubenswrapper[5133]: I1121 14:07:59.809161 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-59pqc\" (UniqueName: \"kubernetes.io/projected/74c8b0ac-4ff7-4fea-bff7-7a6d9356e745-kube-api-access-59pqc\") pod \"dnsmasq-dns-fbc59fbb7-dcjpl\" (UID: \"74c8b0ac-4ff7-4fea-bff7-7a6d9356e745\") " pod="openstack/dnsmasq-dns-fbc59fbb7-dcjpl" Nov 21 14:07:59 crc kubenswrapper[5133]: I1121 14:07:59.902579 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-fbc59fbb7-dcjpl" Nov 21 14:08:00 crc kubenswrapper[5133]: I1121 14:08:00.004846 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-68d4b6d797-qk78s" Nov 21 14:08:00 crc kubenswrapper[5133]: I1121 14:08:00.178329 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/33fa8cc5-ffef-4e3b-8816-e643c09ff259-ovsdbserver-sb\") pod \"33fa8cc5-ffef-4e3b-8816-e643c09ff259\" (UID: \"33fa8cc5-ffef-4e3b-8816-e643c09ff259\") " Nov 21 14:08:00 crc kubenswrapper[5133]: I1121 14:08:00.178407 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/33fa8cc5-ffef-4e3b-8816-e643c09ff259-ovsdbserver-nb\") pod \"33fa8cc5-ffef-4e3b-8816-e643c09ff259\" (UID: \"33fa8cc5-ffef-4e3b-8816-e643c09ff259\") " Nov 21 14:08:00 crc kubenswrapper[5133]: I1121 14:08:00.180104 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/33fa8cc5-ffef-4e3b-8816-e643c09ff259-dns-svc\") pod \"33fa8cc5-ffef-4e3b-8816-e643c09ff259\" (UID: \"33fa8cc5-ffef-4e3b-8816-e643c09ff259\") " Nov 21 14:08:00 crc kubenswrapper[5133]: I1121 14:08:00.180143 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-njwbp\" (UniqueName: \"kubernetes.io/projected/33fa8cc5-ffef-4e3b-8816-e643c09ff259-kube-api-access-njwbp\") pod \"33fa8cc5-ffef-4e3b-8816-e643c09ff259\" (UID: \"33fa8cc5-ffef-4e3b-8816-e643c09ff259\") " Nov 21 14:08:00 crc kubenswrapper[5133]: I1121 14:08:00.180181 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/33fa8cc5-ffef-4e3b-8816-e643c09ff259-config\") pod \"33fa8cc5-ffef-4e3b-8816-e643c09ff259\" (UID: \"33fa8cc5-ffef-4e3b-8816-e643c09ff259\") " Nov 21 14:08:00 crc kubenswrapper[5133]: I1121 14:08:00.184808 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/33fa8cc5-ffef-4e3b-8816-e643c09ff259-kube-api-access-njwbp" (OuterVolumeSpecName: "kube-api-access-njwbp") pod "33fa8cc5-ffef-4e3b-8816-e643c09ff259" (UID: "33fa8cc5-ffef-4e3b-8816-e643c09ff259"). InnerVolumeSpecName "kube-api-access-njwbp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:08:00 crc kubenswrapper[5133]: I1121 14:08:00.223302 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/33fa8cc5-ffef-4e3b-8816-e643c09ff259-config" (OuterVolumeSpecName: "config") pod "33fa8cc5-ffef-4e3b-8816-e643c09ff259" (UID: "33fa8cc5-ffef-4e3b-8816-e643c09ff259"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:08:00 crc kubenswrapper[5133]: I1121 14:08:00.224891 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/33fa8cc5-ffef-4e3b-8816-e643c09ff259-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "33fa8cc5-ffef-4e3b-8816-e643c09ff259" (UID: "33fa8cc5-ffef-4e3b-8816-e643c09ff259"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:08:00 crc kubenswrapper[5133]: I1121 14:08:00.233529 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/33fa8cc5-ffef-4e3b-8816-e643c09ff259-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "33fa8cc5-ffef-4e3b-8816-e643c09ff259" (UID: "33fa8cc5-ffef-4e3b-8816-e643c09ff259"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:08:00 crc kubenswrapper[5133]: I1121 14:08:00.243961 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/33fa8cc5-ffef-4e3b-8816-e643c09ff259-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "33fa8cc5-ffef-4e3b-8816-e643c09ff259" (UID: "33fa8cc5-ffef-4e3b-8816-e643c09ff259"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:08:00 crc kubenswrapper[5133]: I1121 14:08:00.284012 5133 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/33fa8cc5-ffef-4e3b-8816-e643c09ff259-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 21 14:08:00 crc kubenswrapper[5133]: I1121 14:08:00.284116 5133 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/33fa8cc5-ffef-4e3b-8816-e643c09ff259-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 21 14:08:00 crc kubenswrapper[5133]: I1121 14:08:00.284174 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-njwbp\" (UniqueName: \"kubernetes.io/projected/33fa8cc5-ffef-4e3b-8816-e643c09ff259-kube-api-access-njwbp\") on node \"crc\" DevicePath \"\"" Nov 21 14:08:00 crc kubenswrapper[5133]: I1121 14:08:00.284190 5133 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/33fa8cc5-ffef-4e3b-8816-e643c09ff259-config\") on node \"crc\" DevicePath \"\"" Nov 21 14:08:00 crc kubenswrapper[5133]: I1121 14:08:00.284202 5133 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/33fa8cc5-ffef-4e3b-8816-e643c09ff259-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 21 14:08:00 crc kubenswrapper[5133]: I1121 14:08:00.390052 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-fbc59fbb7-dcjpl"] Nov 21 14:08:00 crc kubenswrapper[5133]: I1121 14:08:00.777251 5133 generic.go:334] "Generic (PLEG): container finished" podID="74c8b0ac-4ff7-4fea-bff7-7a6d9356e745" containerID="5cb12f57c7d673c89569bac68aa0f81e6b315eabf894678b0750d4b0bdc3233e" exitCode=0 Nov 21 14:08:00 crc kubenswrapper[5133]: I1121 14:08:00.777458 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fbc59fbb7-dcjpl" event={"ID":"74c8b0ac-4ff7-4fea-bff7-7a6d9356e745","Type":"ContainerDied","Data":"5cb12f57c7d673c89569bac68aa0f81e6b315eabf894678b0750d4b0bdc3233e"} Nov 21 14:08:00 crc kubenswrapper[5133]: I1121 14:08:00.777705 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fbc59fbb7-dcjpl" event={"ID":"74c8b0ac-4ff7-4fea-bff7-7a6d9356e745","Type":"ContainerStarted","Data":"e2df6725baf5124bf138a6961f071e82e4fea6ee8aeb6b0c0c82dfa1a0b5e7af"} Nov 21 14:08:00 crc kubenswrapper[5133]: I1121 14:08:00.781469 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68d4b6d797-qk78s" event={"ID":"33fa8cc5-ffef-4e3b-8816-e643c09ff259","Type":"ContainerDied","Data":"9fecb190a31ef82c0080ee668495979969d6f7b60a70209a50aa20d57946fe23"} Nov 21 14:08:00 crc kubenswrapper[5133]: I1121 14:08:00.781514 5133 scope.go:117] "RemoveContainer" containerID="9d8af020fcabe90d00acb5ecde08a3014609bb733173a3643bb6fa952f36c8f8" Nov 21 14:08:00 crc kubenswrapper[5133]: I1121 14:08:00.781518 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-68d4b6d797-qk78s" Nov 21 14:08:00 crc kubenswrapper[5133]: I1121 14:08:00.929406 5133 scope.go:117] "RemoveContainer" containerID="4a0d33d8e7e2e22cec3c93bff1b15a28cb06bfde2d35e28a475554e37986c5ee" Nov 21 14:08:00 crc kubenswrapper[5133]: I1121 14:08:00.953691 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-68d4b6d797-qk78s"] Nov 21 14:08:00 crc kubenswrapper[5133]: I1121 14:08:00.961104 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-68d4b6d797-qk78s"] Nov 21 14:08:01 crc kubenswrapper[5133]: I1121 14:08:01.809526 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fbc59fbb7-dcjpl" event={"ID":"74c8b0ac-4ff7-4fea-bff7-7a6d9356e745","Type":"ContainerStarted","Data":"54c3b38ae97cca8f97c9c0637bb132924af3ffc49699b75bf620afbf5c0b5b21"} Nov 21 14:08:01 crc kubenswrapper[5133]: I1121 14:08:01.810020 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-fbc59fbb7-dcjpl" Nov 21 14:08:01 crc kubenswrapper[5133]: I1121 14:08:01.852893 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-fbc59fbb7-dcjpl" podStartSLOduration=2.852872035 podStartE2EDuration="2.852872035s" podCreationTimestamp="2025-11-21 14:07:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 14:08:01.841796139 +0000 UTC m=+1541.639628427" watchObservedRunningTime="2025-11-21 14:08:01.852872035 +0000 UTC m=+1541.650704283" Nov 21 14:08:02 crc kubenswrapper[5133]: I1121 14:08:02.471337 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="33fa8cc5-ffef-4e3b-8816-e643c09ff259" path="/var/lib/kubelet/pods/33fa8cc5-ffef-4e3b-8816-e643c09ff259/volumes" Nov 21 14:08:04 crc kubenswrapper[5133]: I1121 14:08:04.646233 5133 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-68d4b6d797-qk78s" podUID="33fa8cc5-ffef-4e3b-8816-e643c09ff259" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.180:5353: i/o timeout" Nov 21 14:08:09 crc kubenswrapper[5133]: I1121 14:08:09.904238 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-fbc59fbb7-dcjpl" Nov 21 14:08:10 crc kubenswrapper[5133]: I1121 14:08:10.039362 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-578b8d767c-bn5mn"] Nov 21 14:08:10 crc kubenswrapper[5133]: I1121 14:08:10.041250 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-578b8d767c-bn5mn" podUID="559830d9-5f39-4555-ba6f-0ee5c6c6a12a" containerName="dnsmasq-dns" containerID="cri-o://06d19524ac3e1dd23dd5276cc6f55e1ec4b0509a0c73a97aaeb87f00cc61a55a" gracePeriod=10 Nov 21 14:08:10 crc kubenswrapper[5133]: I1121 14:08:10.505378 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-578b8d767c-bn5mn" Nov 21 14:08:10 crc kubenswrapper[5133]: I1121 14:08:10.611308 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/559830d9-5f39-4555-ba6f-0ee5c6c6a12a-ovsdbserver-nb\") pod \"559830d9-5f39-4555-ba6f-0ee5c6c6a12a\" (UID: \"559830d9-5f39-4555-ba6f-0ee5c6c6a12a\") " Nov 21 14:08:10 crc kubenswrapper[5133]: I1121 14:08:10.611748 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/559830d9-5f39-4555-ba6f-0ee5c6c6a12a-dns-svc\") pod \"559830d9-5f39-4555-ba6f-0ee5c6c6a12a\" (UID: \"559830d9-5f39-4555-ba6f-0ee5c6c6a12a\") " Nov 21 14:08:10 crc kubenswrapper[5133]: I1121 14:08:10.611807 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/559830d9-5f39-4555-ba6f-0ee5c6c6a12a-openstack-edpm-ipam\") pod \"559830d9-5f39-4555-ba6f-0ee5c6c6a12a\" (UID: \"559830d9-5f39-4555-ba6f-0ee5c6c6a12a\") " Nov 21 14:08:10 crc kubenswrapper[5133]: I1121 14:08:10.611836 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gc28w\" (UniqueName: \"kubernetes.io/projected/559830d9-5f39-4555-ba6f-0ee5c6c6a12a-kube-api-access-gc28w\") pod \"559830d9-5f39-4555-ba6f-0ee5c6c6a12a\" (UID: \"559830d9-5f39-4555-ba6f-0ee5c6c6a12a\") " Nov 21 14:08:10 crc kubenswrapper[5133]: I1121 14:08:10.611893 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/559830d9-5f39-4555-ba6f-0ee5c6c6a12a-ovsdbserver-sb\") pod \"559830d9-5f39-4555-ba6f-0ee5c6c6a12a\" (UID: \"559830d9-5f39-4555-ba6f-0ee5c6c6a12a\") " Nov 21 14:08:10 crc kubenswrapper[5133]: I1121 14:08:10.611928 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/559830d9-5f39-4555-ba6f-0ee5c6c6a12a-config\") pod \"559830d9-5f39-4555-ba6f-0ee5c6c6a12a\" (UID: \"559830d9-5f39-4555-ba6f-0ee5c6c6a12a\") " Nov 21 14:08:10 crc kubenswrapper[5133]: I1121 14:08:10.624234 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/559830d9-5f39-4555-ba6f-0ee5c6c6a12a-kube-api-access-gc28w" (OuterVolumeSpecName: "kube-api-access-gc28w") pod "559830d9-5f39-4555-ba6f-0ee5c6c6a12a" (UID: "559830d9-5f39-4555-ba6f-0ee5c6c6a12a"). InnerVolumeSpecName "kube-api-access-gc28w". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:08:10 crc kubenswrapper[5133]: I1121 14:08:10.666052 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/559830d9-5f39-4555-ba6f-0ee5c6c6a12a-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "559830d9-5f39-4555-ba6f-0ee5c6c6a12a" (UID: "559830d9-5f39-4555-ba6f-0ee5c6c6a12a"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:08:10 crc kubenswrapper[5133]: I1121 14:08:10.667126 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/559830d9-5f39-4555-ba6f-0ee5c6c6a12a-config" (OuterVolumeSpecName: "config") pod "559830d9-5f39-4555-ba6f-0ee5c6c6a12a" (UID: "559830d9-5f39-4555-ba6f-0ee5c6c6a12a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:08:10 crc kubenswrapper[5133]: I1121 14:08:10.667270 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/559830d9-5f39-4555-ba6f-0ee5c6c6a12a-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "559830d9-5f39-4555-ba6f-0ee5c6c6a12a" (UID: "559830d9-5f39-4555-ba6f-0ee5c6c6a12a"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:08:10 crc kubenswrapper[5133]: I1121 14:08:10.673055 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/559830d9-5f39-4555-ba6f-0ee5c6c6a12a-openstack-edpm-ipam" (OuterVolumeSpecName: "openstack-edpm-ipam") pod "559830d9-5f39-4555-ba6f-0ee5c6c6a12a" (UID: "559830d9-5f39-4555-ba6f-0ee5c6c6a12a"). InnerVolumeSpecName "openstack-edpm-ipam". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:08:10 crc kubenswrapper[5133]: I1121 14:08:10.695656 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/559830d9-5f39-4555-ba6f-0ee5c6c6a12a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "559830d9-5f39-4555-ba6f-0ee5c6c6a12a" (UID: "559830d9-5f39-4555-ba6f-0ee5c6c6a12a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:08:10 crc kubenswrapper[5133]: I1121 14:08:10.716435 5133 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/559830d9-5f39-4555-ba6f-0ee5c6c6a12a-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 21 14:08:10 crc kubenswrapper[5133]: I1121 14:08:10.716470 5133 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/559830d9-5f39-4555-ba6f-0ee5c6c6a12a-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 21 14:08:10 crc kubenswrapper[5133]: I1121 14:08:10.716483 5133 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/559830d9-5f39-4555-ba6f-0ee5c6c6a12a-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Nov 21 14:08:10 crc kubenswrapper[5133]: I1121 14:08:10.716492 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gc28w\" (UniqueName: \"kubernetes.io/projected/559830d9-5f39-4555-ba6f-0ee5c6c6a12a-kube-api-access-gc28w\") on node \"crc\" DevicePath \"\"" Nov 21 14:08:10 crc kubenswrapper[5133]: I1121 14:08:10.716502 5133 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/559830d9-5f39-4555-ba6f-0ee5c6c6a12a-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 21 14:08:10 crc kubenswrapper[5133]: I1121 14:08:10.716511 5133 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/559830d9-5f39-4555-ba6f-0ee5c6c6a12a-config\") on node \"crc\" DevicePath \"\"" Nov 21 14:08:10 crc kubenswrapper[5133]: I1121 14:08:10.914284 5133 generic.go:334] "Generic (PLEG): container finished" podID="559830d9-5f39-4555-ba6f-0ee5c6c6a12a" containerID="06d19524ac3e1dd23dd5276cc6f55e1ec4b0509a0c73a97aaeb87f00cc61a55a" exitCode=0 Nov 21 14:08:10 crc kubenswrapper[5133]: I1121 14:08:10.914363 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-578b8d767c-bn5mn" event={"ID":"559830d9-5f39-4555-ba6f-0ee5c6c6a12a","Type":"ContainerDied","Data":"06d19524ac3e1dd23dd5276cc6f55e1ec4b0509a0c73a97aaeb87f00cc61a55a"} Nov 21 14:08:10 crc kubenswrapper[5133]: I1121 14:08:10.914416 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-578b8d767c-bn5mn" event={"ID":"559830d9-5f39-4555-ba6f-0ee5c6c6a12a","Type":"ContainerDied","Data":"ba412428ac31ff1905e1d25b13c2747a4e8576e94970aff276cbfe57e2219d7e"} Nov 21 14:08:10 crc kubenswrapper[5133]: I1121 14:08:10.914429 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-578b8d767c-bn5mn" Nov 21 14:08:10 crc kubenswrapper[5133]: I1121 14:08:10.914453 5133 scope.go:117] "RemoveContainer" containerID="06d19524ac3e1dd23dd5276cc6f55e1ec4b0509a0c73a97aaeb87f00cc61a55a" Nov 21 14:08:10 crc kubenswrapper[5133]: I1121 14:08:10.948586 5133 scope.go:117] "RemoveContainer" containerID="df6c3e2b1de9f65fac0acbcb7db14947616b66f0f1b6c6d70e9c8c5460199988" Nov 21 14:08:10 crc kubenswrapper[5133]: I1121 14:08:10.969396 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-578b8d767c-bn5mn"] Nov 21 14:08:10 crc kubenswrapper[5133]: I1121 14:08:10.976603 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-578b8d767c-bn5mn"] Nov 21 14:08:10 crc kubenswrapper[5133]: I1121 14:08:10.999402 5133 scope.go:117] "RemoveContainer" containerID="06d19524ac3e1dd23dd5276cc6f55e1ec4b0509a0c73a97aaeb87f00cc61a55a" Nov 21 14:08:11 crc kubenswrapper[5133]: E1121 14:08:10.999960 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"06d19524ac3e1dd23dd5276cc6f55e1ec4b0509a0c73a97aaeb87f00cc61a55a\": container with ID starting with 06d19524ac3e1dd23dd5276cc6f55e1ec4b0509a0c73a97aaeb87f00cc61a55a not found: ID does not exist" containerID="06d19524ac3e1dd23dd5276cc6f55e1ec4b0509a0c73a97aaeb87f00cc61a55a" Nov 21 14:08:11 crc kubenswrapper[5133]: I1121 14:08:11.000102 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"06d19524ac3e1dd23dd5276cc6f55e1ec4b0509a0c73a97aaeb87f00cc61a55a"} err="failed to get container status \"06d19524ac3e1dd23dd5276cc6f55e1ec4b0509a0c73a97aaeb87f00cc61a55a\": rpc error: code = NotFound desc = could not find container \"06d19524ac3e1dd23dd5276cc6f55e1ec4b0509a0c73a97aaeb87f00cc61a55a\": container with ID starting with 06d19524ac3e1dd23dd5276cc6f55e1ec4b0509a0c73a97aaeb87f00cc61a55a not found: ID does not exist" Nov 21 14:08:11 crc kubenswrapper[5133]: I1121 14:08:11.000149 5133 scope.go:117] "RemoveContainer" containerID="df6c3e2b1de9f65fac0acbcb7db14947616b66f0f1b6c6d70e9c8c5460199988" Nov 21 14:08:11 crc kubenswrapper[5133]: E1121 14:08:11.000582 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"df6c3e2b1de9f65fac0acbcb7db14947616b66f0f1b6c6d70e9c8c5460199988\": container with ID starting with df6c3e2b1de9f65fac0acbcb7db14947616b66f0f1b6c6d70e9c8c5460199988 not found: ID does not exist" containerID="df6c3e2b1de9f65fac0acbcb7db14947616b66f0f1b6c6d70e9c8c5460199988" Nov 21 14:08:11 crc kubenswrapper[5133]: I1121 14:08:11.000636 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"df6c3e2b1de9f65fac0acbcb7db14947616b66f0f1b6c6d70e9c8c5460199988"} err="failed to get container status \"df6c3e2b1de9f65fac0acbcb7db14947616b66f0f1b6c6d70e9c8c5460199988\": rpc error: code = NotFound desc = could not find container \"df6c3e2b1de9f65fac0acbcb7db14947616b66f0f1b6c6d70e9c8c5460199988\": container with ID starting with df6c3e2b1de9f65fac0acbcb7db14947616b66f0f1b6c6d70e9c8c5460199988 not found: ID does not exist" Nov 21 14:08:12 crc kubenswrapper[5133]: I1121 14:08:12.478107 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="559830d9-5f39-4555-ba6f-0ee5c6c6a12a" path="/var/lib/kubelet/pods/559830d9-5f39-4555-ba6f-0ee5c6c6a12a/volumes" Nov 21 14:08:15 crc kubenswrapper[5133]: I1121 14:08:15.704531 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-nsgcs"] Nov 21 14:08:15 crc kubenswrapper[5133]: E1121 14:08:15.705569 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33fa8cc5-ffef-4e3b-8816-e643c09ff259" containerName="init" Nov 21 14:08:15 crc kubenswrapper[5133]: I1121 14:08:15.705585 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="33fa8cc5-ffef-4e3b-8816-e643c09ff259" containerName="init" Nov 21 14:08:15 crc kubenswrapper[5133]: E1121 14:08:15.705612 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="559830d9-5f39-4555-ba6f-0ee5c6c6a12a" containerName="dnsmasq-dns" Nov 21 14:08:15 crc kubenswrapper[5133]: I1121 14:08:15.705620 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="559830d9-5f39-4555-ba6f-0ee5c6c6a12a" containerName="dnsmasq-dns" Nov 21 14:08:15 crc kubenswrapper[5133]: E1121 14:08:15.705635 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33fa8cc5-ffef-4e3b-8816-e643c09ff259" containerName="dnsmasq-dns" Nov 21 14:08:15 crc kubenswrapper[5133]: I1121 14:08:15.705642 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="33fa8cc5-ffef-4e3b-8816-e643c09ff259" containerName="dnsmasq-dns" Nov 21 14:08:15 crc kubenswrapper[5133]: E1121 14:08:15.705662 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="559830d9-5f39-4555-ba6f-0ee5c6c6a12a" containerName="init" Nov 21 14:08:15 crc kubenswrapper[5133]: I1121 14:08:15.705668 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="559830d9-5f39-4555-ba6f-0ee5c6c6a12a" containerName="init" Nov 21 14:08:15 crc kubenswrapper[5133]: I1121 14:08:15.705861 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="559830d9-5f39-4555-ba6f-0ee5c6c6a12a" containerName="dnsmasq-dns" Nov 21 14:08:15 crc kubenswrapper[5133]: I1121 14:08:15.705885 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="33fa8cc5-ffef-4e3b-8816-e643c09ff259" containerName="dnsmasq-dns" Nov 21 14:08:15 crc kubenswrapper[5133]: I1121 14:08:15.707410 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-nsgcs" Nov 21 14:08:15 crc kubenswrapper[5133]: I1121 14:08:15.717491 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-nsgcs"] Nov 21 14:08:15 crc kubenswrapper[5133]: I1121 14:08:15.815188 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/502ec960-5d5e-4019-8c44-72b51fd5867d-utilities\") pod \"certified-operators-nsgcs\" (UID: \"502ec960-5d5e-4019-8c44-72b51fd5867d\") " pod="openshift-marketplace/certified-operators-nsgcs" Nov 21 14:08:15 crc kubenswrapper[5133]: I1121 14:08:15.815282 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rdzq5\" (UniqueName: \"kubernetes.io/projected/502ec960-5d5e-4019-8c44-72b51fd5867d-kube-api-access-rdzq5\") pod \"certified-operators-nsgcs\" (UID: \"502ec960-5d5e-4019-8c44-72b51fd5867d\") " pod="openshift-marketplace/certified-operators-nsgcs" Nov 21 14:08:15 crc kubenswrapper[5133]: I1121 14:08:15.815383 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/502ec960-5d5e-4019-8c44-72b51fd5867d-catalog-content\") pod \"certified-operators-nsgcs\" (UID: \"502ec960-5d5e-4019-8c44-72b51fd5867d\") " pod="openshift-marketplace/certified-operators-nsgcs" Nov 21 14:08:15 crc kubenswrapper[5133]: I1121 14:08:15.917909 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/502ec960-5d5e-4019-8c44-72b51fd5867d-utilities\") pod \"certified-operators-nsgcs\" (UID: \"502ec960-5d5e-4019-8c44-72b51fd5867d\") " pod="openshift-marketplace/certified-operators-nsgcs" Nov 21 14:08:15 crc kubenswrapper[5133]: I1121 14:08:15.918024 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdzq5\" (UniqueName: \"kubernetes.io/projected/502ec960-5d5e-4019-8c44-72b51fd5867d-kube-api-access-rdzq5\") pod \"certified-operators-nsgcs\" (UID: \"502ec960-5d5e-4019-8c44-72b51fd5867d\") " pod="openshift-marketplace/certified-operators-nsgcs" Nov 21 14:08:15 crc kubenswrapper[5133]: I1121 14:08:15.918115 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/502ec960-5d5e-4019-8c44-72b51fd5867d-catalog-content\") pod \"certified-operators-nsgcs\" (UID: \"502ec960-5d5e-4019-8c44-72b51fd5867d\") " pod="openshift-marketplace/certified-operators-nsgcs" Nov 21 14:08:15 crc kubenswrapper[5133]: I1121 14:08:15.918724 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/502ec960-5d5e-4019-8c44-72b51fd5867d-utilities\") pod \"certified-operators-nsgcs\" (UID: \"502ec960-5d5e-4019-8c44-72b51fd5867d\") " pod="openshift-marketplace/certified-operators-nsgcs" Nov 21 14:08:15 crc kubenswrapper[5133]: I1121 14:08:15.918814 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/502ec960-5d5e-4019-8c44-72b51fd5867d-catalog-content\") pod \"certified-operators-nsgcs\" (UID: \"502ec960-5d5e-4019-8c44-72b51fd5867d\") " pod="openshift-marketplace/certified-operators-nsgcs" Nov 21 14:08:15 crc kubenswrapper[5133]: I1121 14:08:15.945149 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdzq5\" (UniqueName: \"kubernetes.io/projected/502ec960-5d5e-4019-8c44-72b51fd5867d-kube-api-access-rdzq5\") pod \"certified-operators-nsgcs\" (UID: \"502ec960-5d5e-4019-8c44-72b51fd5867d\") " pod="openshift-marketplace/certified-operators-nsgcs" Nov 21 14:08:16 crc kubenswrapper[5133]: I1121 14:08:16.039185 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-nsgcs" Nov 21 14:08:16 crc kubenswrapper[5133]: I1121 14:08:16.566102 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-nsgcs"] Nov 21 14:08:16 crc kubenswrapper[5133]: I1121 14:08:16.988757 5133 generic.go:334] "Generic (PLEG): container finished" podID="502ec960-5d5e-4019-8c44-72b51fd5867d" containerID="f6b06a39cd16f80dd2fdb6f5e565af713919306ea41b24a7d0fc13962ef288dd" exitCode=0 Nov 21 14:08:16 crc kubenswrapper[5133]: I1121 14:08:16.988844 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nsgcs" event={"ID":"502ec960-5d5e-4019-8c44-72b51fd5867d","Type":"ContainerDied","Data":"f6b06a39cd16f80dd2fdb6f5e565af713919306ea41b24a7d0fc13962ef288dd"} Nov 21 14:08:16 crc kubenswrapper[5133]: I1121 14:08:16.988911 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nsgcs" event={"ID":"502ec960-5d5e-4019-8c44-72b51fd5867d","Type":"ContainerStarted","Data":"b6bb97fa8231eb81c42d4a0e2eec4775ab0b1e7bc132b86c2b5acfae8a2327bc"} Nov 21 14:08:19 crc kubenswrapper[5133]: I1121 14:08:19.019515 5133 generic.go:334] "Generic (PLEG): container finished" podID="502ec960-5d5e-4019-8c44-72b51fd5867d" containerID="6d3c0f561fce8ac684ced3ff456bb88b545cd74c0eca125228841d095eb69558" exitCode=0 Nov 21 14:08:19 crc kubenswrapper[5133]: I1121 14:08:19.020247 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nsgcs" event={"ID":"502ec960-5d5e-4019-8c44-72b51fd5867d","Type":"ContainerDied","Data":"6d3c0f561fce8ac684ced3ff456bb88b545cd74c0eca125228841d095eb69558"} Nov 21 14:08:20 crc kubenswrapper[5133]: I1121 14:08:20.000548 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-tjgmw"] Nov 21 14:08:20 crc kubenswrapper[5133]: I1121 14:08:20.002815 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-tjgmw" Nov 21 14:08:20 crc kubenswrapper[5133]: I1121 14:08:20.009533 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 21 14:08:20 crc kubenswrapper[5133]: I1121 14:08:20.009697 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-ztmk4" Nov 21 14:08:20 crc kubenswrapper[5133]: I1121 14:08:20.009767 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 21 14:08:20 crc kubenswrapper[5133]: I1121 14:08:20.009842 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 21 14:08:20 crc kubenswrapper[5133]: I1121 14:08:20.021609 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-tjgmw"] Nov 21 14:08:20 crc kubenswrapper[5133]: I1121 14:08:20.045864 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nsgcs" event={"ID":"502ec960-5d5e-4019-8c44-72b51fd5867d","Type":"ContainerStarted","Data":"b132cfb2b3ec65832fc3dbd555b1a8d48fce6d390de7b91e0506a1679e074c81"} Nov 21 14:08:20 crc kubenswrapper[5133]: I1121 14:08:20.068224 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-nsgcs" podStartSLOduration=2.637198809 podStartE2EDuration="5.068208558s" podCreationTimestamp="2025-11-21 14:08:15 +0000 UTC" firstStartedPulling="2025-11-21 14:08:16.991120146 +0000 UTC m=+1556.788952434" lastFinishedPulling="2025-11-21 14:08:19.422129925 +0000 UTC m=+1559.219962183" observedRunningTime="2025-11-21 14:08:20.06340136 +0000 UTC m=+1559.861233608" watchObservedRunningTime="2025-11-21 14:08:20.068208558 +0000 UTC m=+1559.866040806" Nov 21 14:08:20 crc kubenswrapper[5133]: I1121 14:08:20.105559 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a541160c-9971-4969-9296-23972aaf6bbf-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-tjgmw\" (UID: \"a541160c-9971-4969-9296-23972aaf6bbf\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-tjgmw" Nov 21 14:08:20 crc kubenswrapper[5133]: I1121 14:08:20.105608 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a541160c-9971-4969-9296-23972aaf6bbf-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-tjgmw\" (UID: \"a541160c-9971-4969-9296-23972aaf6bbf\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-tjgmw" Nov 21 14:08:20 crc kubenswrapper[5133]: I1121 14:08:20.105693 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mmxhm\" (UniqueName: \"kubernetes.io/projected/a541160c-9971-4969-9296-23972aaf6bbf-kube-api-access-mmxhm\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-tjgmw\" (UID: \"a541160c-9971-4969-9296-23972aaf6bbf\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-tjgmw" Nov 21 14:08:20 crc kubenswrapper[5133]: I1121 14:08:20.105899 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a541160c-9971-4969-9296-23972aaf6bbf-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-tjgmw\" (UID: \"a541160c-9971-4969-9296-23972aaf6bbf\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-tjgmw" Nov 21 14:08:20 crc kubenswrapper[5133]: I1121 14:08:20.207143 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a541160c-9971-4969-9296-23972aaf6bbf-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-tjgmw\" (UID: \"a541160c-9971-4969-9296-23972aaf6bbf\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-tjgmw" Nov 21 14:08:20 crc kubenswrapper[5133]: I1121 14:08:20.207226 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a541160c-9971-4969-9296-23972aaf6bbf-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-tjgmw\" (UID: \"a541160c-9971-4969-9296-23972aaf6bbf\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-tjgmw" Nov 21 14:08:20 crc kubenswrapper[5133]: I1121 14:08:20.207247 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a541160c-9971-4969-9296-23972aaf6bbf-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-tjgmw\" (UID: \"a541160c-9971-4969-9296-23972aaf6bbf\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-tjgmw" Nov 21 14:08:20 crc kubenswrapper[5133]: I1121 14:08:20.207320 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mmxhm\" (UniqueName: \"kubernetes.io/projected/a541160c-9971-4969-9296-23972aaf6bbf-kube-api-access-mmxhm\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-tjgmw\" (UID: \"a541160c-9971-4969-9296-23972aaf6bbf\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-tjgmw" Nov 21 14:08:20 crc kubenswrapper[5133]: I1121 14:08:20.213750 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a541160c-9971-4969-9296-23972aaf6bbf-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-tjgmw\" (UID: \"a541160c-9971-4969-9296-23972aaf6bbf\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-tjgmw" Nov 21 14:08:20 crc kubenswrapper[5133]: I1121 14:08:20.214168 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a541160c-9971-4969-9296-23972aaf6bbf-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-tjgmw\" (UID: \"a541160c-9971-4969-9296-23972aaf6bbf\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-tjgmw" Nov 21 14:08:20 crc kubenswrapper[5133]: I1121 14:08:20.218464 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a541160c-9971-4969-9296-23972aaf6bbf-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-tjgmw\" (UID: \"a541160c-9971-4969-9296-23972aaf6bbf\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-tjgmw" Nov 21 14:08:20 crc kubenswrapper[5133]: I1121 14:08:20.224143 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mmxhm\" (UniqueName: \"kubernetes.io/projected/a541160c-9971-4969-9296-23972aaf6bbf-kube-api-access-mmxhm\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-tjgmw\" (UID: \"a541160c-9971-4969-9296-23972aaf6bbf\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-tjgmw" Nov 21 14:08:20 crc kubenswrapper[5133]: I1121 14:08:20.327495 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-tjgmw" Nov 21 14:08:20 crc kubenswrapper[5133]: I1121 14:08:20.932803 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-tjgmw"] Nov 21 14:08:20 crc kubenswrapper[5133]: W1121 14:08:20.941057 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda541160c_9971_4969_9296_23972aaf6bbf.slice/crio-f6098bda21dbffa94ce7d5462d2f4d48c098e88e0a6dd2e70232e60291395b9b WatchSource:0}: Error finding container f6098bda21dbffa94ce7d5462d2f4d48c098e88e0a6dd2e70232e60291395b9b: Status 404 returned error can't find the container with id f6098bda21dbffa94ce7d5462d2f4d48c098e88e0a6dd2e70232e60291395b9b Nov 21 14:08:21 crc kubenswrapper[5133]: I1121 14:08:21.057975 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-tjgmw" event={"ID":"a541160c-9971-4969-9296-23972aaf6bbf","Type":"ContainerStarted","Data":"f6098bda21dbffa94ce7d5462d2f4d48c098e88e0a6dd2e70232e60291395b9b"} Nov 21 14:08:24 crc kubenswrapper[5133]: I1121 14:08:24.089857 5133 generic.go:334] "Generic (PLEG): container finished" podID="83679b73-67d4-4733-b362-44060f589afd" containerID="bc7080a86c95a6996d4938ec1b9cb99705d3e5315d0977cc88547cdb8ca59e82" exitCode=0 Nov 21 14:08:24 crc kubenswrapper[5133]: I1121 14:08:24.090150 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"83679b73-67d4-4733-b362-44060f589afd","Type":"ContainerDied","Data":"bc7080a86c95a6996d4938ec1b9cb99705d3e5315d0977cc88547cdb8ca59e82"} Nov 21 14:08:25 crc kubenswrapper[5133]: I1121 14:08:25.105618 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"83679b73-67d4-4733-b362-44060f589afd","Type":"ContainerStarted","Data":"1c371157d411aad975de9671108f8422d5300afda568b700eeed40ec57274416"} Nov 21 14:08:25 crc kubenswrapper[5133]: I1121 14:08:25.106415 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 21 14:08:25 crc kubenswrapper[5133]: I1121 14:08:25.111713 5133 generic.go:334] "Generic (PLEG): container finished" podID="45d066aa-3bb1-4fdc-8c88-c384dd156e93" containerID="5e668bd92219d450e80edea4e46348a441be0c959200a45f10060d000ca12b29" exitCode=0 Nov 21 14:08:25 crc kubenswrapper[5133]: I1121 14:08:25.111767 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"45d066aa-3bb1-4fdc-8c88-c384dd156e93","Type":"ContainerDied","Data":"5e668bd92219d450e80edea4e46348a441be0c959200a45f10060d000ca12b29"} Nov 21 14:08:25 crc kubenswrapper[5133]: I1121 14:08:25.141254 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=38.140978174 podStartE2EDuration="38.140978174s" podCreationTimestamp="2025-11-21 14:07:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 14:08:25.132195889 +0000 UTC m=+1564.930028147" watchObservedRunningTime="2025-11-21 14:08:25.140978174 +0000 UTC m=+1564.938810432" Nov 21 14:08:26 crc kubenswrapper[5133]: I1121 14:08:26.039530 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-nsgcs" Nov 21 14:08:26 crc kubenswrapper[5133]: I1121 14:08:26.039597 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-nsgcs" Nov 21 14:08:26 crc kubenswrapper[5133]: I1121 14:08:26.106486 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-nsgcs" Nov 21 14:08:26 crc kubenswrapper[5133]: I1121 14:08:26.179566 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-nsgcs" Nov 21 14:08:26 crc kubenswrapper[5133]: I1121 14:08:26.349119 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-nsgcs"] Nov 21 14:08:28 crc kubenswrapper[5133]: I1121 14:08:28.150199 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-nsgcs" podUID="502ec960-5d5e-4019-8c44-72b51fd5867d" containerName="registry-server" containerID="cri-o://b132cfb2b3ec65832fc3dbd555b1a8d48fce6d390de7b91e0506a1679e074c81" gracePeriod=2 Nov 21 14:08:29 crc kubenswrapper[5133]: I1121 14:08:29.181962 5133 generic.go:334] "Generic (PLEG): container finished" podID="502ec960-5d5e-4019-8c44-72b51fd5867d" containerID="b132cfb2b3ec65832fc3dbd555b1a8d48fce6d390de7b91e0506a1679e074c81" exitCode=0 Nov 21 14:08:29 crc kubenswrapper[5133]: I1121 14:08:29.182437 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nsgcs" event={"ID":"502ec960-5d5e-4019-8c44-72b51fd5867d","Type":"ContainerDied","Data":"b132cfb2b3ec65832fc3dbd555b1a8d48fce6d390de7b91e0506a1679e074c81"} Nov 21 14:08:34 crc kubenswrapper[5133]: E1121 14:08:34.712037 5133 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/openstack-ansibleee-runner:latest" Nov 21 14:08:34 crc kubenswrapper[5133]: E1121 14:08:34.713321 5133 kuberuntime_manager.go:1274] "Unhandled Error" err=< Nov 21 14:08:34 crc kubenswrapper[5133]: container &Container{Name:repo-setup-edpm-deployment-openstack-edpm-ipam,Image:quay.io/openstack-k8s-operators/openstack-ansibleee-runner:latest,Command:[],Args:[ansible-runner run /runner -p playbook.yaml -i repo-setup-edpm-deployment-openstack-edpm-ipam],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:ANSIBLE_VERBOSITY,Value:2,ValueFrom:nil,},EnvVar{Name:RUNNER_PLAYBOOK,Value: Nov 21 14:08:34 crc kubenswrapper[5133]: - hosts: all Nov 21 14:08:34 crc kubenswrapper[5133]: strategy: linear Nov 21 14:08:34 crc kubenswrapper[5133]: tasks: Nov 21 14:08:34 crc kubenswrapper[5133]: - name: Enable podified-repos Nov 21 14:08:34 crc kubenswrapper[5133]: become: true Nov 21 14:08:34 crc kubenswrapper[5133]: ansible.builtin.shell: | Nov 21 14:08:34 crc kubenswrapper[5133]: set -euxo pipefail Nov 21 14:08:34 crc kubenswrapper[5133]: pushd /var/tmp Nov 21 14:08:34 crc kubenswrapper[5133]: curl -sL https://github.com/openstack-k8s-operators/repo-setup/archive/refs/heads/main.tar.gz | tar -xz Nov 21 14:08:34 crc kubenswrapper[5133]: pushd repo-setup-main Nov 21 14:08:34 crc kubenswrapper[5133]: python3 -m venv ./venv Nov 21 14:08:34 crc kubenswrapper[5133]: PBR_VERSION=0.0.0 ./venv/bin/pip install ./ Nov 21 14:08:34 crc kubenswrapper[5133]: ./venv/bin/repo-setup current-podified -b antelope Nov 21 14:08:34 crc kubenswrapper[5133]: popd Nov 21 14:08:34 crc kubenswrapper[5133]: rm -rf repo-setup-main Nov 21 14:08:34 crc kubenswrapper[5133]: Nov 21 14:08:34 crc kubenswrapper[5133]: Nov 21 14:08:34 crc kubenswrapper[5133]: ,ValueFrom:nil,},EnvVar{Name:RUNNER_EXTRA_VARS,Value: Nov 21 14:08:34 crc kubenswrapper[5133]: edpm_override_hosts: openstack-edpm-ipam Nov 21 14:08:34 crc kubenswrapper[5133]: edpm_service_type: repo-setup Nov 21 14:08:34 crc kubenswrapper[5133]: Nov 21 14:08:34 crc kubenswrapper[5133]: Nov 21 14:08:34 crc kubenswrapper[5133]: ,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:repo-setup-combined-ca-bundle,ReadOnly:false,MountPath:/var/lib/openstack/cacerts/repo-setup,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ssh-key,ReadOnly:false,MountPath:/runner/env/ssh_key,SubPath:ssh_key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:inventory,ReadOnly:false,MountPath:/runner/inventory/hosts,SubPath:inventory,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-mmxhm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{EnvFromSource{Prefix:,ConfigMapRef:&ConfigMapEnvSource{LocalObjectReference:LocalObjectReference{Name:openstack-aee-default-env,},Optional:*true,},SecretRef:nil,},},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod repo-setup-edpm-deployment-openstack-edpm-ipam-tjgmw_openstack(a541160c-9971-4969-9296-23972aaf6bbf): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled Nov 21 14:08:34 crc kubenswrapper[5133]: > logger="UnhandledError" Nov 21 14:08:34 crc kubenswrapper[5133]: E1121 14:08:34.715398 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"repo-setup-edpm-deployment-openstack-edpm-ipam\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-tjgmw" podUID="a541160c-9971-4969-9296-23972aaf6bbf" Nov 21 14:08:35 crc kubenswrapper[5133]: I1121 14:08:35.045609 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-nsgcs" Nov 21 14:08:35 crc kubenswrapper[5133]: I1121 14:08:35.163542 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rdzq5\" (UniqueName: \"kubernetes.io/projected/502ec960-5d5e-4019-8c44-72b51fd5867d-kube-api-access-rdzq5\") pod \"502ec960-5d5e-4019-8c44-72b51fd5867d\" (UID: \"502ec960-5d5e-4019-8c44-72b51fd5867d\") " Nov 21 14:08:35 crc kubenswrapper[5133]: I1121 14:08:35.163616 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/502ec960-5d5e-4019-8c44-72b51fd5867d-catalog-content\") pod \"502ec960-5d5e-4019-8c44-72b51fd5867d\" (UID: \"502ec960-5d5e-4019-8c44-72b51fd5867d\") " Nov 21 14:08:35 crc kubenswrapper[5133]: I1121 14:08:35.163817 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/502ec960-5d5e-4019-8c44-72b51fd5867d-utilities\") pod \"502ec960-5d5e-4019-8c44-72b51fd5867d\" (UID: \"502ec960-5d5e-4019-8c44-72b51fd5867d\") " Nov 21 14:08:35 crc kubenswrapper[5133]: I1121 14:08:35.165074 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/502ec960-5d5e-4019-8c44-72b51fd5867d-utilities" (OuterVolumeSpecName: "utilities") pod "502ec960-5d5e-4019-8c44-72b51fd5867d" (UID: "502ec960-5d5e-4019-8c44-72b51fd5867d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:08:35 crc kubenswrapper[5133]: I1121 14:08:35.170963 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/502ec960-5d5e-4019-8c44-72b51fd5867d-kube-api-access-rdzq5" (OuterVolumeSpecName: "kube-api-access-rdzq5") pod "502ec960-5d5e-4019-8c44-72b51fd5867d" (UID: "502ec960-5d5e-4019-8c44-72b51fd5867d"). InnerVolumeSpecName "kube-api-access-rdzq5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:08:35 crc kubenswrapper[5133]: I1121 14:08:35.235307 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/502ec960-5d5e-4019-8c44-72b51fd5867d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "502ec960-5d5e-4019-8c44-72b51fd5867d" (UID: "502ec960-5d5e-4019-8c44-72b51fd5867d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:08:35 crc kubenswrapper[5133]: I1121 14:08:35.264958 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nsgcs" event={"ID":"502ec960-5d5e-4019-8c44-72b51fd5867d","Type":"ContainerDied","Data":"b6bb97fa8231eb81c42d4a0e2eec4775ab0b1e7bc132b86c2b5acfae8a2327bc"} Nov 21 14:08:35 crc kubenswrapper[5133]: I1121 14:08:35.265042 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-nsgcs" Nov 21 14:08:35 crc kubenswrapper[5133]: I1121 14:08:35.265065 5133 scope.go:117] "RemoveContainer" containerID="b132cfb2b3ec65832fc3dbd555b1a8d48fce6d390de7b91e0506a1679e074c81" Nov 21 14:08:35 crc kubenswrapper[5133]: I1121 14:08:35.267645 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rdzq5\" (UniqueName: \"kubernetes.io/projected/502ec960-5d5e-4019-8c44-72b51fd5867d-kube-api-access-rdzq5\") on node \"crc\" DevicePath \"\"" Nov 21 14:08:35 crc kubenswrapper[5133]: I1121 14:08:35.267752 5133 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/502ec960-5d5e-4019-8c44-72b51fd5867d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 14:08:35 crc kubenswrapper[5133]: I1121 14:08:35.268057 5133 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/502ec960-5d5e-4019-8c44-72b51fd5867d-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 14:08:35 crc kubenswrapper[5133]: I1121 14:08:35.269891 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"45d066aa-3bb1-4fdc-8c88-c384dd156e93","Type":"ContainerStarted","Data":"a86f896c59f50d526237301f519e921698d405428b462e5214e5c75b7286a5ff"} Nov 21 14:08:35 crc kubenswrapper[5133]: I1121 14:08:35.270274 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:08:35 crc kubenswrapper[5133]: E1121 14:08:35.271341 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"repo-setup-edpm-deployment-openstack-edpm-ipam\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-ansibleee-runner:latest\\\"\"" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-tjgmw" podUID="a541160c-9971-4969-9296-23972aaf6bbf" Nov 21 14:08:35 crc kubenswrapper[5133]: I1121 14:08:35.310287 5133 scope.go:117] "RemoveContainer" containerID="6d3c0f561fce8ac684ced3ff456bb88b545cd74c0eca125228841d095eb69558" Nov 21 14:08:35 crc kubenswrapper[5133]: I1121 14:08:35.325211 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=47.325193458 podStartE2EDuration="47.325193458s" podCreationTimestamp="2025-11-21 14:07:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 14:08:35.324065938 +0000 UTC m=+1575.121898206" watchObservedRunningTime="2025-11-21 14:08:35.325193458 +0000 UTC m=+1575.123025706" Nov 21 14:08:35 crc kubenswrapper[5133]: I1121 14:08:35.356480 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-nsgcs"] Nov 21 14:08:35 crc kubenswrapper[5133]: I1121 14:08:35.370345 5133 scope.go:117] "RemoveContainer" containerID="f6b06a39cd16f80dd2fdb6f5e565af713919306ea41b24a7d0fc13962ef288dd" Nov 21 14:08:35 crc kubenswrapper[5133]: I1121 14:08:35.370481 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-nsgcs"] Nov 21 14:08:36 crc kubenswrapper[5133]: I1121 14:08:36.470714 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="502ec960-5d5e-4019-8c44-72b51fd5867d" path="/var/lib/kubelet/pods/502ec960-5d5e-4019-8c44-72b51fd5867d/volumes" Nov 21 14:08:37 crc kubenswrapper[5133]: I1121 14:08:37.986290 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 21 14:08:47 crc kubenswrapper[5133]: I1121 14:08:47.898043 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 21 14:08:48 crc kubenswrapper[5133]: I1121 14:08:48.427189 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-tjgmw" event={"ID":"a541160c-9971-4969-9296-23972aaf6bbf","Type":"ContainerStarted","Data":"859852c198f5f3f6fba179cf2d1f27465aaa13222a592f190933741620fbc7f6"} Nov 21 14:08:48 crc kubenswrapper[5133]: I1121 14:08:48.457475 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-tjgmw" podStartSLOduration=2.50776149 podStartE2EDuration="29.457446729s" podCreationTimestamp="2025-11-21 14:08:19 +0000 UTC" firstStartedPulling="2025-11-21 14:08:20.944333654 +0000 UTC m=+1560.742165902" lastFinishedPulling="2025-11-21 14:08:47.894018883 +0000 UTC m=+1587.691851141" observedRunningTime="2025-11-21 14:08:48.449802185 +0000 UTC m=+1588.247634463" watchObservedRunningTime="2025-11-21 14:08:48.457446729 +0000 UTC m=+1588.255279017" Nov 21 14:08:49 crc kubenswrapper[5133]: I1121 14:08:49.022249 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 21 14:08:53 crc kubenswrapper[5133]: I1121 14:08:53.310328 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 14:08:53 crc kubenswrapper[5133]: I1121 14:08:53.311620 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 14:09:00 crc kubenswrapper[5133]: I1121 14:09:00.565262 5133 generic.go:334] "Generic (PLEG): container finished" podID="a541160c-9971-4969-9296-23972aaf6bbf" containerID="859852c198f5f3f6fba179cf2d1f27465aaa13222a592f190933741620fbc7f6" exitCode=0 Nov 21 14:09:00 crc kubenswrapper[5133]: I1121 14:09:00.565284 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-tjgmw" event={"ID":"a541160c-9971-4969-9296-23972aaf6bbf","Type":"ContainerDied","Data":"859852c198f5f3f6fba179cf2d1f27465aaa13222a592f190933741620fbc7f6"} Nov 21 14:09:02 crc kubenswrapper[5133]: I1121 14:09:02.041061 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-tjgmw" Nov 21 14:09:02 crc kubenswrapper[5133]: I1121 14:09:02.148647 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mmxhm\" (UniqueName: \"kubernetes.io/projected/a541160c-9971-4969-9296-23972aaf6bbf-kube-api-access-mmxhm\") pod \"a541160c-9971-4969-9296-23972aaf6bbf\" (UID: \"a541160c-9971-4969-9296-23972aaf6bbf\") " Nov 21 14:09:02 crc kubenswrapper[5133]: I1121 14:09:02.148685 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a541160c-9971-4969-9296-23972aaf6bbf-ssh-key\") pod \"a541160c-9971-4969-9296-23972aaf6bbf\" (UID: \"a541160c-9971-4969-9296-23972aaf6bbf\") " Nov 21 14:09:02 crc kubenswrapper[5133]: I1121 14:09:02.148765 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a541160c-9971-4969-9296-23972aaf6bbf-repo-setup-combined-ca-bundle\") pod \"a541160c-9971-4969-9296-23972aaf6bbf\" (UID: \"a541160c-9971-4969-9296-23972aaf6bbf\") " Nov 21 14:09:02 crc kubenswrapper[5133]: I1121 14:09:02.148800 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a541160c-9971-4969-9296-23972aaf6bbf-inventory\") pod \"a541160c-9971-4969-9296-23972aaf6bbf\" (UID: \"a541160c-9971-4969-9296-23972aaf6bbf\") " Nov 21 14:09:02 crc kubenswrapper[5133]: I1121 14:09:02.154304 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a541160c-9971-4969-9296-23972aaf6bbf-kube-api-access-mmxhm" (OuterVolumeSpecName: "kube-api-access-mmxhm") pod "a541160c-9971-4969-9296-23972aaf6bbf" (UID: "a541160c-9971-4969-9296-23972aaf6bbf"). InnerVolumeSpecName "kube-api-access-mmxhm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:09:02 crc kubenswrapper[5133]: I1121 14:09:02.157118 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a541160c-9971-4969-9296-23972aaf6bbf-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "a541160c-9971-4969-9296-23972aaf6bbf" (UID: "a541160c-9971-4969-9296-23972aaf6bbf"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:09:02 crc kubenswrapper[5133]: I1121 14:09:02.175215 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a541160c-9971-4969-9296-23972aaf6bbf-inventory" (OuterVolumeSpecName: "inventory") pod "a541160c-9971-4969-9296-23972aaf6bbf" (UID: "a541160c-9971-4969-9296-23972aaf6bbf"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:09:02 crc kubenswrapper[5133]: I1121 14:09:02.176970 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a541160c-9971-4969-9296-23972aaf6bbf-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "a541160c-9971-4969-9296-23972aaf6bbf" (UID: "a541160c-9971-4969-9296-23972aaf6bbf"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:09:02 crc kubenswrapper[5133]: I1121 14:09:02.251034 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mmxhm\" (UniqueName: \"kubernetes.io/projected/a541160c-9971-4969-9296-23972aaf6bbf-kube-api-access-mmxhm\") on node \"crc\" DevicePath \"\"" Nov 21 14:09:02 crc kubenswrapper[5133]: I1121 14:09:02.251063 5133 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a541160c-9971-4969-9296-23972aaf6bbf-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 21 14:09:02 crc kubenswrapper[5133]: I1121 14:09:02.251073 5133 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a541160c-9971-4969-9296-23972aaf6bbf-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 14:09:02 crc kubenswrapper[5133]: I1121 14:09:02.251082 5133 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a541160c-9971-4969-9296-23972aaf6bbf-inventory\") on node \"crc\" DevicePath \"\"" Nov 21 14:09:02 crc kubenswrapper[5133]: I1121 14:09:02.588956 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-tjgmw" event={"ID":"a541160c-9971-4969-9296-23972aaf6bbf","Type":"ContainerDied","Data":"f6098bda21dbffa94ce7d5462d2f4d48c098e88e0a6dd2e70232e60291395b9b"} Nov 21 14:09:02 crc kubenswrapper[5133]: I1121 14:09:02.589255 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f6098bda21dbffa94ce7d5462d2f4d48c098e88e0a6dd2e70232e60291395b9b" Nov 21 14:09:02 crc kubenswrapper[5133]: I1121 14:09:02.589084 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-tjgmw" Nov 21 14:09:02 crc kubenswrapper[5133]: I1121 14:09:02.698471 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6d2rx"] Nov 21 14:09:02 crc kubenswrapper[5133]: E1121 14:09:02.698870 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="502ec960-5d5e-4019-8c44-72b51fd5867d" containerName="extract-content" Nov 21 14:09:02 crc kubenswrapper[5133]: I1121 14:09:02.698883 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="502ec960-5d5e-4019-8c44-72b51fd5867d" containerName="extract-content" Nov 21 14:09:02 crc kubenswrapper[5133]: E1121 14:09:02.698900 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="502ec960-5d5e-4019-8c44-72b51fd5867d" containerName="registry-server" Nov 21 14:09:02 crc kubenswrapper[5133]: I1121 14:09:02.698906 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="502ec960-5d5e-4019-8c44-72b51fd5867d" containerName="registry-server" Nov 21 14:09:02 crc kubenswrapper[5133]: E1121 14:09:02.698914 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a541160c-9971-4969-9296-23972aaf6bbf" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 21 14:09:02 crc kubenswrapper[5133]: I1121 14:09:02.698924 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="a541160c-9971-4969-9296-23972aaf6bbf" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 21 14:09:02 crc kubenswrapper[5133]: E1121 14:09:02.698948 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="502ec960-5d5e-4019-8c44-72b51fd5867d" containerName="extract-utilities" Nov 21 14:09:02 crc kubenswrapper[5133]: I1121 14:09:02.698955 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="502ec960-5d5e-4019-8c44-72b51fd5867d" containerName="extract-utilities" Nov 21 14:09:02 crc kubenswrapper[5133]: I1121 14:09:02.699127 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="502ec960-5d5e-4019-8c44-72b51fd5867d" containerName="registry-server" Nov 21 14:09:02 crc kubenswrapper[5133]: I1121 14:09:02.699144 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="a541160c-9971-4969-9296-23972aaf6bbf" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 21 14:09:02 crc kubenswrapper[5133]: I1121 14:09:02.699678 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6d2rx" Nov 21 14:09:02 crc kubenswrapper[5133]: I1121 14:09:02.703256 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 21 14:09:02 crc kubenswrapper[5133]: I1121 14:09:02.703372 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-ztmk4" Nov 21 14:09:02 crc kubenswrapper[5133]: I1121 14:09:02.703452 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 21 14:09:02 crc kubenswrapper[5133]: I1121 14:09:02.703667 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 21 14:09:02 crc kubenswrapper[5133]: I1121 14:09:02.719288 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6d2rx"] Nov 21 14:09:02 crc kubenswrapper[5133]: I1121 14:09:02.761253 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g8pqv\" (UniqueName: \"kubernetes.io/projected/a2eb306a-ed7a-4481-ae0f-5dd3c3046d81-kube-api-access-g8pqv\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6d2rx\" (UID: \"a2eb306a-ed7a-4481-ae0f-5dd3c3046d81\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6d2rx" Nov 21 14:09:02 crc kubenswrapper[5133]: I1121 14:09:02.761374 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a2eb306a-ed7a-4481-ae0f-5dd3c3046d81-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6d2rx\" (UID: \"a2eb306a-ed7a-4481-ae0f-5dd3c3046d81\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6d2rx" Nov 21 14:09:02 crc kubenswrapper[5133]: I1121 14:09:02.761416 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a2eb306a-ed7a-4481-ae0f-5dd3c3046d81-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6d2rx\" (UID: \"a2eb306a-ed7a-4481-ae0f-5dd3c3046d81\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6d2rx" Nov 21 14:09:02 crc kubenswrapper[5133]: I1121 14:09:02.761458 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a2eb306a-ed7a-4481-ae0f-5dd3c3046d81-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6d2rx\" (UID: \"a2eb306a-ed7a-4481-ae0f-5dd3c3046d81\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6d2rx" Nov 21 14:09:02 crc kubenswrapper[5133]: I1121 14:09:02.863083 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a2eb306a-ed7a-4481-ae0f-5dd3c3046d81-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6d2rx\" (UID: \"a2eb306a-ed7a-4481-ae0f-5dd3c3046d81\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6d2rx" Nov 21 14:09:02 crc kubenswrapper[5133]: I1121 14:09:02.863146 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a2eb306a-ed7a-4481-ae0f-5dd3c3046d81-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6d2rx\" (UID: \"a2eb306a-ed7a-4481-ae0f-5dd3c3046d81\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6d2rx" Nov 21 14:09:02 crc kubenswrapper[5133]: I1121 14:09:02.863176 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a2eb306a-ed7a-4481-ae0f-5dd3c3046d81-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6d2rx\" (UID: \"a2eb306a-ed7a-4481-ae0f-5dd3c3046d81\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6d2rx" Nov 21 14:09:02 crc kubenswrapper[5133]: I1121 14:09:02.863245 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g8pqv\" (UniqueName: \"kubernetes.io/projected/a2eb306a-ed7a-4481-ae0f-5dd3c3046d81-kube-api-access-g8pqv\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6d2rx\" (UID: \"a2eb306a-ed7a-4481-ae0f-5dd3c3046d81\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6d2rx" Nov 21 14:09:02 crc kubenswrapper[5133]: I1121 14:09:02.868461 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a2eb306a-ed7a-4481-ae0f-5dd3c3046d81-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6d2rx\" (UID: \"a2eb306a-ed7a-4481-ae0f-5dd3c3046d81\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6d2rx" Nov 21 14:09:02 crc kubenswrapper[5133]: I1121 14:09:02.868740 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a2eb306a-ed7a-4481-ae0f-5dd3c3046d81-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6d2rx\" (UID: \"a2eb306a-ed7a-4481-ae0f-5dd3c3046d81\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6d2rx" Nov 21 14:09:02 crc kubenswrapper[5133]: I1121 14:09:02.877950 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a2eb306a-ed7a-4481-ae0f-5dd3c3046d81-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6d2rx\" (UID: \"a2eb306a-ed7a-4481-ae0f-5dd3c3046d81\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6d2rx" Nov 21 14:09:02 crc kubenswrapper[5133]: I1121 14:09:02.886174 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g8pqv\" (UniqueName: \"kubernetes.io/projected/a2eb306a-ed7a-4481-ae0f-5dd3c3046d81-kube-api-access-g8pqv\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6d2rx\" (UID: \"a2eb306a-ed7a-4481-ae0f-5dd3c3046d81\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6d2rx" Nov 21 14:09:03 crc kubenswrapper[5133]: I1121 14:09:03.014972 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6d2rx" Nov 21 14:09:03 crc kubenswrapper[5133]: I1121 14:09:03.453527 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6d2rx"] Nov 21 14:09:03 crc kubenswrapper[5133]: W1121 14:09:03.456085 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda2eb306a_ed7a_4481_ae0f_5dd3c3046d81.slice/crio-c4ce289da2e9f36d5148969523d7ba94f3b4e55d93d61cf51b0ea3d6e1daca93 WatchSource:0}: Error finding container c4ce289da2e9f36d5148969523d7ba94f3b4e55d93d61cf51b0ea3d6e1daca93: Status 404 returned error can't find the container with id c4ce289da2e9f36d5148969523d7ba94f3b4e55d93d61cf51b0ea3d6e1daca93 Nov 21 14:09:03 crc kubenswrapper[5133]: I1121 14:09:03.596937 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6d2rx" event={"ID":"a2eb306a-ed7a-4481-ae0f-5dd3c3046d81","Type":"ContainerStarted","Data":"c4ce289da2e9f36d5148969523d7ba94f3b4e55d93d61cf51b0ea3d6e1daca93"} Nov 21 14:09:05 crc kubenswrapper[5133]: I1121 14:09:05.640667 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6d2rx" event={"ID":"a2eb306a-ed7a-4481-ae0f-5dd3c3046d81","Type":"ContainerStarted","Data":"cbd66e4f5fe271e3ad76ab9c4422dba23228103a77ba7e97e882f314be1db169"} Nov 21 14:09:05 crc kubenswrapper[5133]: I1121 14:09:05.669531 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6d2rx" podStartSLOduration=2.617109047 podStartE2EDuration="3.669487449s" podCreationTimestamp="2025-11-21 14:09:02 +0000 UTC" firstStartedPulling="2025-11-21 14:09:03.458526627 +0000 UTC m=+1603.256358865" lastFinishedPulling="2025-11-21 14:09:04.510905019 +0000 UTC m=+1604.308737267" observedRunningTime="2025-11-21 14:09:05.665187414 +0000 UTC m=+1605.463019702" watchObservedRunningTime="2025-11-21 14:09:05.669487449 +0000 UTC m=+1605.467319697" Nov 21 14:09:06 crc kubenswrapper[5133]: I1121 14:09:06.883736 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-tzjcp"] Nov 21 14:09:06 crc kubenswrapper[5133]: I1121 14:09:06.887023 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tzjcp" Nov 21 14:09:06 crc kubenswrapper[5133]: I1121 14:09:06.921890 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-tzjcp"] Nov 21 14:09:06 crc kubenswrapper[5133]: I1121 14:09:06.964416 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t8597\" (UniqueName: \"kubernetes.io/projected/e24ebbe8-70f0-41d4-85b5-25c57f51c04b-kube-api-access-t8597\") pod \"redhat-marketplace-tzjcp\" (UID: \"e24ebbe8-70f0-41d4-85b5-25c57f51c04b\") " pod="openshift-marketplace/redhat-marketplace-tzjcp" Nov 21 14:09:06 crc kubenswrapper[5133]: I1121 14:09:06.964954 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e24ebbe8-70f0-41d4-85b5-25c57f51c04b-utilities\") pod \"redhat-marketplace-tzjcp\" (UID: \"e24ebbe8-70f0-41d4-85b5-25c57f51c04b\") " pod="openshift-marketplace/redhat-marketplace-tzjcp" Nov 21 14:09:06 crc kubenswrapper[5133]: I1121 14:09:06.965059 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e24ebbe8-70f0-41d4-85b5-25c57f51c04b-catalog-content\") pod \"redhat-marketplace-tzjcp\" (UID: \"e24ebbe8-70f0-41d4-85b5-25c57f51c04b\") " pod="openshift-marketplace/redhat-marketplace-tzjcp" Nov 21 14:09:07 crc kubenswrapper[5133]: I1121 14:09:07.067142 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e24ebbe8-70f0-41d4-85b5-25c57f51c04b-utilities\") pod \"redhat-marketplace-tzjcp\" (UID: \"e24ebbe8-70f0-41d4-85b5-25c57f51c04b\") " pod="openshift-marketplace/redhat-marketplace-tzjcp" Nov 21 14:09:07 crc kubenswrapper[5133]: I1121 14:09:07.067200 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e24ebbe8-70f0-41d4-85b5-25c57f51c04b-catalog-content\") pod \"redhat-marketplace-tzjcp\" (UID: \"e24ebbe8-70f0-41d4-85b5-25c57f51c04b\") " pod="openshift-marketplace/redhat-marketplace-tzjcp" Nov 21 14:09:07 crc kubenswrapper[5133]: I1121 14:09:07.067272 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t8597\" (UniqueName: \"kubernetes.io/projected/e24ebbe8-70f0-41d4-85b5-25c57f51c04b-kube-api-access-t8597\") pod \"redhat-marketplace-tzjcp\" (UID: \"e24ebbe8-70f0-41d4-85b5-25c57f51c04b\") " pod="openshift-marketplace/redhat-marketplace-tzjcp" Nov 21 14:09:07 crc kubenswrapper[5133]: I1121 14:09:07.067730 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e24ebbe8-70f0-41d4-85b5-25c57f51c04b-utilities\") pod \"redhat-marketplace-tzjcp\" (UID: \"e24ebbe8-70f0-41d4-85b5-25c57f51c04b\") " pod="openshift-marketplace/redhat-marketplace-tzjcp" Nov 21 14:09:07 crc kubenswrapper[5133]: I1121 14:09:07.068075 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e24ebbe8-70f0-41d4-85b5-25c57f51c04b-catalog-content\") pod \"redhat-marketplace-tzjcp\" (UID: \"e24ebbe8-70f0-41d4-85b5-25c57f51c04b\") " pod="openshift-marketplace/redhat-marketplace-tzjcp" Nov 21 14:09:07 crc kubenswrapper[5133]: I1121 14:09:07.098388 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t8597\" (UniqueName: \"kubernetes.io/projected/e24ebbe8-70f0-41d4-85b5-25c57f51c04b-kube-api-access-t8597\") pod \"redhat-marketplace-tzjcp\" (UID: \"e24ebbe8-70f0-41d4-85b5-25c57f51c04b\") " pod="openshift-marketplace/redhat-marketplace-tzjcp" Nov 21 14:09:07 crc kubenswrapper[5133]: I1121 14:09:07.254041 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tzjcp" Nov 21 14:09:07 crc kubenswrapper[5133]: I1121 14:09:07.528907 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-tzjcp"] Nov 21 14:09:07 crc kubenswrapper[5133]: I1121 14:09:07.659858 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tzjcp" event={"ID":"e24ebbe8-70f0-41d4-85b5-25c57f51c04b","Type":"ContainerStarted","Data":"1306b90a9c94bee5e333a5fb5e8e8f9d8500ed94c76e877632af7932255d7069"} Nov 21 14:09:08 crc kubenswrapper[5133]: I1121 14:09:08.675073 5133 generic.go:334] "Generic (PLEG): container finished" podID="e24ebbe8-70f0-41d4-85b5-25c57f51c04b" containerID="3fa5921c94bdcb029afdc2041056f2a787374efea8c4da69f675b2bb37070e58" exitCode=0 Nov 21 14:09:08 crc kubenswrapper[5133]: I1121 14:09:08.675155 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tzjcp" event={"ID":"e24ebbe8-70f0-41d4-85b5-25c57f51c04b","Type":"ContainerDied","Data":"3fa5921c94bdcb029afdc2041056f2a787374efea8c4da69f675b2bb37070e58"} Nov 21 14:09:09 crc kubenswrapper[5133]: I1121 14:09:09.690994 5133 generic.go:334] "Generic (PLEG): container finished" podID="e24ebbe8-70f0-41d4-85b5-25c57f51c04b" containerID="47280f28d0e4f9a3eb34f5b174fade00b0169e351973a219321835d9bea36a27" exitCode=0 Nov 21 14:09:09 crc kubenswrapper[5133]: I1121 14:09:09.691191 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tzjcp" event={"ID":"e24ebbe8-70f0-41d4-85b5-25c57f51c04b","Type":"ContainerDied","Data":"47280f28d0e4f9a3eb34f5b174fade00b0169e351973a219321835d9bea36a27"} Nov 21 14:09:10 crc kubenswrapper[5133]: I1121 14:09:10.702901 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tzjcp" event={"ID":"e24ebbe8-70f0-41d4-85b5-25c57f51c04b","Type":"ContainerStarted","Data":"4f847bb3ea747bd3696606d073640a2ee97a31fe47d4f1010c717d18b60ed2f9"} Nov 21 14:09:10 crc kubenswrapper[5133]: I1121 14:09:10.721964 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-tzjcp" podStartSLOduration=3.023613639 podStartE2EDuration="4.721939513s" podCreationTimestamp="2025-11-21 14:09:06 +0000 UTC" firstStartedPulling="2025-11-21 14:09:08.679483289 +0000 UTC m=+1608.477315577" lastFinishedPulling="2025-11-21 14:09:10.377809203 +0000 UTC m=+1610.175641451" observedRunningTime="2025-11-21 14:09:10.719439576 +0000 UTC m=+1610.517271824" watchObservedRunningTime="2025-11-21 14:09:10.721939513 +0000 UTC m=+1610.519771761" Nov 21 14:09:17 crc kubenswrapper[5133]: I1121 14:09:17.254564 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-tzjcp" Nov 21 14:09:17 crc kubenswrapper[5133]: I1121 14:09:17.255322 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-tzjcp" Nov 21 14:09:17 crc kubenswrapper[5133]: I1121 14:09:17.343588 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-tzjcp" Nov 21 14:09:17 crc kubenswrapper[5133]: I1121 14:09:17.877341 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-tzjcp" Nov 21 14:09:17 crc kubenswrapper[5133]: I1121 14:09:17.946701 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-tzjcp"] Nov 21 14:09:19 crc kubenswrapper[5133]: I1121 14:09:19.819075 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-tzjcp" podUID="e24ebbe8-70f0-41d4-85b5-25c57f51c04b" containerName="registry-server" containerID="cri-o://4f847bb3ea747bd3696606d073640a2ee97a31fe47d4f1010c717d18b60ed2f9" gracePeriod=2 Nov 21 14:09:20 crc kubenswrapper[5133]: I1121 14:09:20.423020 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tzjcp" Nov 21 14:09:20 crc kubenswrapper[5133]: I1121 14:09:20.459845 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e24ebbe8-70f0-41d4-85b5-25c57f51c04b-catalog-content\") pod \"e24ebbe8-70f0-41d4-85b5-25c57f51c04b\" (UID: \"e24ebbe8-70f0-41d4-85b5-25c57f51c04b\") " Nov 21 14:09:20 crc kubenswrapper[5133]: I1121 14:09:20.460248 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t8597\" (UniqueName: \"kubernetes.io/projected/e24ebbe8-70f0-41d4-85b5-25c57f51c04b-kube-api-access-t8597\") pod \"e24ebbe8-70f0-41d4-85b5-25c57f51c04b\" (UID: \"e24ebbe8-70f0-41d4-85b5-25c57f51c04b\") " Nov 21 14:09:20 crc kubenswrapper[5133]: I1121 14:09:20.460327 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e24ebbe8-70f0-41d4-85b5-25c57f51c04b-utilities\") pod \"e24ebbe8-70f0-41d4-85b5-25c57f51c04b\" (UID: \"e24ebbe8-70f0-41d4-85b5-25c57f51c04b\") " Nov 21 14:09:20 crc kubenswrapper[5133]: I1121 14:09:20.462118 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e24ebbe8-70f0-41d4-85b5-25c57f51c04b-utilities" (OuterVolumeSpecName: "utilities") pod "e24ebbe8-70f0-41d4-85b5-25c57f51c04b" (UID: "e24ebbe8-70f0-41d4-85b5-25c57f51c04b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:09:20 crc kubenswrapper[5133]: I1121 14:09:20.480259 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e24ebbe8-70f0-41d4-85b5-25c57f51c04b-kube-api-access-t8597" (OuterVolumeSpecName: "kube-api-access-t8597") pod "e24ebbe8-70f0-41d4-85b5-25c57f51c04b" (UID: "e24ebbe8-70f0-41d4-85b5-25c57f51c04b"). InnerVolumeSpecName "kube-api-access-t8597". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:09:20 crc kubenswrapper[5133]: I1121 14:09:20.511460 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e24ebbe8-70f0-41d4-85b5-25c57f51c04b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e24ebbe8-70f0-41d4-85b5-25c57f51c04b" (UID: "e24ebbe8-70f0-41d4-85b5-25c57f51c04b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:09:20 crc kubenswrapper[5133]: I1121 14:09:20.562614 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t8597\" (UniqueName: \"kubernetes.io/projected/e24ebbe8-70f0-41d4-85b5-25c57f51c04b-kube-api-access-t8597\") on node \"crc\" DevicePath \"\"" Nov 21 14:09:20 crc kubenswrapper[5133]: I1121 14:09:20.562664 5133 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e24ebbe8-70f0-41d4-85b5-25c57f51c04b-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 14:09:20 crc kubenswrapper[5133]: I1121 14:09:20.562674 5133 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e24ebbe8-70f0-41d4-85b5-25c57f51c04b-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 14:09:20 crc kubenswrapper[5133]: I1121 14:09:20.832388 5133 generic.go:334] "Generic (PLEG): container finished" podID="e24ebbe8-70f0-41d4-85b5-25c57f51c04b" containerID="4f847bb3ea747bd3696606d073640a2ee97a31fe47d4f1010c717d18b60ed2f9" exitCode=0 Nov 21 14:09:20 crc kubenswrapper[5133]: I1121 14:09:20.832487 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tzjcp" Nov 21 14:09:20 crc kubenswrapper[5133]: I1121 14:09:20.832525 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tzjcp" event={"ID":"e24ebbe8-70f0-41d4-85b5-25c57f51c04b","Type":"ContainerDied","Data":"4f847bb3ea747bd3696606d073640a2ee97a31fe47d4f1010c717d18b60ed2f9"} Nov 21 14:09:20 crc kubenswrapper[5133]: I1121 14:09:20.832925 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tzjcp" event={"ID":"e24ebbe8-70f0-41d4-85b5-25c57f51c04b","Type":"ContainerDied","Data":"1306b90a9c94bee5e333a5fb5e8e8f9d8500ed94c76e877632af7932255d7069"} Nov 21 14:09:20 crc kubenswrapper[5133]: I1121 14:09:20.832960 5133 scope.go:117] "RemoveContainer" containerID="4f847bb3ea747bd3696606d073640a2ee97a31fe47d4f1010c717d18b60ed2f9" Nov 21 14:09:20 crc kubenswrapper[5133]: I1121 14:09:20.855760 5133 scope.go:117] "RemoveContainer" containerID="47280f28d0e4f9a3eb34f5b174fade00b0169e351973a219321835d9bea36a27" Nov 21 14:09:20 crc kubenswrapper[5133]: I1121 14:09:20.876030 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-tzjcp"] Nov 21 14:09:20 crc kubenswrapper[5133]: I1121 14:09:20.884820 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-tzjcp"] Nov 21 14:09:20 crc kubenswrapper[5133]: I1121 14:09:20.890466 5133 scope.go:117] "RemoveContainer" containerID="3fa5921c94bdcb029afdc2041056f2a787374efea8c4da69f675b2bb37070e58" Nov 21 14:09:20 crc kubenswrapper[5133]: I1121 14:09:20.924827 5133 scope.go:117] "RemoveContainer" containerID="4f847bb3ea747bd3696606d073640a2ee97a31fe47d4f1010c717d18b60ed2f9" Nov 21 14:09:20 crc kubenswrapper[5133]: E1121 14:09:20.925419 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4f847bb3ea747bd3696606d073640a2ee97a31fe47d4f1010c717d18b60ed2f9\": container with ID starting with 4f847bb3ea747bd3696606d073640a2ee97a31fe47d4f1010c717d18b60ed2f9 not found: ID does not exist" containerID="4f847bb3ea747bd3696606d073640a2ee97a31fe47d4f1010c717d18b60ed2f9" Nov 21 14:09:20 crc kubenswrapper[5133]: I1121 14:09:20.925498 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4f847bb3ea747bd3696606d073640a2ee97a31fe47d4f1010c717d18b60ed2f9"} err="failed to get container status \"4f847bb3ea747bd3696606d073640a2ee97a31fe47d4f1010c717d18b60ed2f9\": rpc error: code = NotFound desc = could not find container \"4f847bb3ea747bd3696606d073640a2ee97a31fe47d4f1010c717d18b60ed2f9\": container with ID starting with 4f847bb3ea747bd3696606d073640a2ee97a31fe47d4f1010c717d18b60ed2f9 not found: ID does not exist" Nov 21 14:09:20 crc kubenswrapper[5133]: I1121 14:09:20.925556 5133 scope.go:117] "RemoveContainer" containerID="47280f28d0e4f9a3eb34f5b174fade00b0169e351973a219321835d9bea36a27" Nov 21 14:09:20 crc kubenswrapper[5133]: E1121 14:09:20.926042 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"47280f28d0e4f9a3eb34f5b174fade00b0169e351973a219321835d9bea36a27\": container with ID starting with 47280f28d0e4f9a3eb34f5b174fade00b0169e351973a219321835d9bea36a27 not found: ID does not exist" containerID="47280f28d0e4f9a3eb34f5b174fade00b0169e351973a219321835d9bea36a27" Nov 21 14:09:20 crc kubenswrapper[5133]: I1121 14:09:20.926073 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"47280f28d0e4f9a3eb34f5b174fade00b0169e351973a219321835d9bea36a27"} err="failed to get container status \"47280f28d0e4f9a3eb34f5b174fade00b0169e351973a219321835d9bea36a27\": rpc error: code = NotFound desc = could not find container \"47280f28d0e4f9a3eb34f5b174fade00b0169e351973a219321835d9bea36a27\": container with ID starting with 47280f28d0e4f9a3eb34f5b174fade00b0169e351973a219321835d9bea36a27 not found: ID does not exist" Nov 21 14:09:20 crc kubenswrapper[5133]: I1121 14:09:20.926094 5133 scope.go:117] "RemoveContainer" containerID="3fa5921c94bdcb029afdc2041056f2a787374efea8c4da69f675b2bb37070e58" Nov 21 14:09:20 crc kubenswrapper[5133]: E1121 14:09:20.926392 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3fa5921c94bdcb029afdc2041056f2a787374efea8c4da69f675b2bb37070e58\": container with ID starting with 3fa5921c94bdcb029afdc2041056f2a787374efea8c4da69f675b2bb37070e58 not found: ID does not exist" containerID="3fa5921c94bdcb029afdc2041056f2a787374efea8c4da69f675b2bb37070e58" Nov 21 14:09:20 crc kubenswrapper[5133]: I1121 14:09:20.926429 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3fa5921c94bdcb029afdc2041056f2a787374efea8c4da69f675b2bb37070e58"} err="failed to get container status \"3fa5921c94bdcb029afdc2041056f2a787374efea8c4da69f675b2bb37070e58\": rpc error: code = NotFound desc = could not find container \"3fa5921c94bdcb029afdc2041056f2a787374efea8c4da69f675b2bb37070e58\": container with ID starting with 3fa5921c94bdcb029afdc2041056f2a787374efea8c4da69f675b2bb37070e58 not found: ID does not exist" Nov 21 14:09:22 crc kubenswrapper[5133]: I1121 14:09:22.470237 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e24ebbe8-70f0-41d4-85b5-25c57f51c04b" path="/var/lib/kubelet/pods/e24ebbe8-70f0-41d4-85b5-25c57f51c04b/volumes" Nov 21 14:09:23 crc kubenswrapper[5133]: I1121 14:09:23.311801 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 14:09:23 crc kubenswrapper[5133]: I1121 14:09:23.311892 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 14:09:34 crc kubenswrapper[5133]: I1121 14:09:34.810717 5133 scope.go:117] "RemoveContainer" containerID="1dbbb3ff6d6a17bec5483a3a76d5b864f4fd3de1c63597309c4c89c6ebee9b7d" Nov 21 14:09:53 crc kubenswrapper[5133]: I1121 14:09:53.311466 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 14:09:53 crc kubenswrapper[5133]: I1121 14:09:53.312592 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 14:09:53 crc kubenswrapper[5133]: I1121 14:09:53.312672 5133 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" Nov 21 14:09:53 crc kubenswrapper[5133]: I1121 14:09:53.314849 5133 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"532d197f04bffdd4c14ef24edeeddc3a83f198653923e9096885d7f28ddecf77"} pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 14:09:53 crc kubenswrapper[5133]: I1121 14:09:53.315168 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" containerID="cri-o://532d197f04bffdd4c14ef24edeeddc3a83f198653923e9096885d7f28ddecf77" gracePeriod=600 Nov 21 14:09:53 crc kubenswrapper[5133]: E1121 14:09:53.469580 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:09:54 crc kubenswrapper[5133]: I1121 14:09:54.223890 5133 generic.go:334] "Generic (PLEG): container finished" podID="52f5a729-05d1-4f84-a216-1df3233af57d" containerID="532d197f04bffdd4c14ef24edeeddc3a83f198653923e9096885d7f28ddecf77" exitCode=0 Nov 21 14:09:54 crc kubenswrapper[5133]: I1121 14:09:54.223983 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" event={"ID":"52f5a729-05d1-4f84-a216-1df3233af57d","Type":"ContainerDied","Data":"532d197f04bffdd4c14ef24edeeddc3a83f198653923e9096885d7f28ddecf77"} Nov 21 14:09:54 crc kubenswrapper[5133]: I1121 14:09:54.224830 5133 scope.go:117] "RemoveContainer" containerID="edc5f6668b324bafe1f33484d097fa3472daf7be070419d53434fd44d4c45cd9" Nov 21 14:09:54 crc kubenswrapper[5133]: I1121 14:09:54.227963 5133 scope.go:117] "RemoveContainer" containerID="532d197f04bffdd4c14ef24edeeddc3a83f198653923e9096885d7f28ddecf77" Nov 21 14:09:54 crc kubenswrapper[5133]: E1121 14:09:54.228592 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:10:05 crc kubenswrapper[5133]: I1121 14:10:05.459597 5133 scope.go:117] "RemoveContainer" containerID="532d197f04bffdd4c14ef24edeeddc3a83f198653923e9096885d7f28ddecf77" Nov 21 14:10:05 crc kubenswrapper[5133]: E1121 14:10:05.460741 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:10:19 crc kubenswrapper[5133]: I1121 14:10:19.459176 5133 scope.go:117] "RemoveContainer" containerID="532d197f04bffdd4c14ef24edeeddc3a83f198653923e9096885d7f28ddecf77" Nov 21 14:10:19 crc kubenswrapper[5133]: E1121 14:10:19.460322 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:10:34 crc kubenswrapper[5133]: I1121 14:10:34.458148 5133 scope.go:117] "RemoveContainer" containerID="532d197f04bffdd4c14ef24edeeddc3a83f198653923e9096885d7f28ddecf77" Nov 21 14:10:34 crc kubenswrapper[5133]: E1121 14:10:34.459382 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:10:34 crc kubenswrapper[5133]: I1121 14:10:34.949622 5133 scope.go:117] "RemoveContainer" containerID="4a5564c86af0c6c595c719fb4772e5581e5b38cf3a2321ee5521d1dbeee04cca" Nov 21 14:10:34 crc kubenswrapper[5133]: I1121 14:10:34.981695 5133 scope.go:117] "RemoveContainer" containerID="aef9c7821dcd9e335b937f09586778fdd2db25d1a69ba8a1cd62a5b313a55c27" Nov 21 14:10:35 crc kubenswrapper[5133]: I1121 14:10:35.050270 5133 scope.go:117] "RemoveContainer" containerID="32d4bf4981adfaeaf3086c173849e5a983169c9fd6b11609978a9f53c1f664d9" Nov 21 14:10:48 crc kubenswrapper[5133]: I1121 14:10:48.458439 5133 scope.go:117] "RemoveContainer" containerID="532d197f04bffdd4c14ef24edeeddc3a83f198653923e9096885d7f28ddecf77" Nov 21 14:10:48 crc kubenswrapper[5133]: E1121 14:10:48.459501 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:11:00 crc kubenswrapper[5133]: I1121 14:11:00.457915 5133 scope.go:117] "RemoveContainer" containerID="532d197f04bffdd4c14ef24edeeddc3a83f198653923e9096885d7f28ddecf77" Nov 21 14:11:00 crc kubenswrapper[5133]: E1121 14:11:00.459266 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:11:12 crc kubenswrapper[5133]: I1121 14:11:12.463413 5133 scope.go:117] "RemoveContainer" containerID="532d197f04bffdd4c14ef24edeeddc3a83f198653923e9096885d7f28ddecf77" Nov 21 14:11:12 crc kubenswrapper[5133]: E1121 14:11:12.464226 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:11:24 crc kubenswrapper[5133]: I1121 14:11:24.458349 5133 scope.go:117] "RemoveContainer" containerID="532d197f04bffdd4c14ef24edeeddc3a83f198653923e9096885d7f28ddecf77" Nov 21 14:11:24 crc kubenswrapper[5133]: E1121 14:11:24.460074 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:11:35 crc kubenswrapper[5133]: I1121 14:11:35.161631 5133 scope.go:117] "RemoveContainer" containerID="aaf6cbe432d492391858df4add3481ecbb299d8cb9f0960501f0f1e9e3a0b68a" Nov 21 14:11:35 crc kubenswrapper[5133]: I1121 14:11:35.210041 5133 scope.go:117] "RemoveContainer" containerID="614cefaf7e9736265ec1c4efaaea778369e6944c8be9c665fe3c9a6ba80a94ca" Nov 21 14:11:35 crc kubenswrapper[5133]: I1121 14:11:35.249436 5133 scope.go:117] "RemoveContainer" containerID="d607039cd6e336e626c7f791fa4173ea4eb51388ee4732d871920d842ca51da0" Nov 21 14:11:35 crc kubenswrapper[5133]: I1121 14:11:35.284204 5133 scope.go:117] "RemoveContainer" containerID="4aa96e95e574b8b2ba92c0677b3baeb7bf0aec250e6b6df4d57d2458805e43b6" Nov 21 14:11:37 crc kubenswrapper[5133]: I1121 14:11:37.457624 5133 scope.go:117] "RemoveContainer" containerID="532d197f04bffdd4c14ef24edeeddc3a83f198653923e9096885d7f28ddecf77" Nov 21 14:11:37 crc kubenswrapper[5133]: E1121 14:11:37.458250 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:11:50 crc kubenswrapper[5133]: I1121 14:11:50.458659 5133 scope.go:117] "RemoveContainer" containerID="532d197f04bffdd4c14ef24edeeddc3a83f198653923e9096885d7f28ddecf77" Nov 21 14:11:50 crc kubenswrapper[5133]: E1121 14:11:50.459805 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:12:03 crc kubenswrapper[5133]: I1121 14:12:03.458965 5133 scope.go:117] "RemoveContainer" containerID="532d197f04bffdd4c14ef24edeeddc3a83f198653923e9096885d7f28ddecf77" Nov 21 14:12:03 crc kubenswrapper[5133]: E1121 14:12:03.460337 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:12:16 crc kubenswrapper[5133]: I1121 14:12:16.459100 5133 scope.go:117] "RemoveContainer" containerID="532d197f04bffdd4c14ef24edeeddc3a83f198653923e9096885d7f28ddecf77" Nov 21 14:12:16 crc kubenswrapper[5133]: E1121 14:12:16.460195 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:12:30 crc kubenswrapper[5133]: I1121 14:12:30.458611 5133 scope.go:117] "RemoveContainer" containerID="532d197f04bffdd4c14ef24edeeddc3a83f198653923e9096885d7f28ddecf77" Nov 21 14:12:30 crc kubenswrapper[5133]: E1121 14:12:30.459775 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:12:38 crc kubenswrapper[5133]: I1121 14:12:38.219250 5133 generic.go:334] "Generic (PLEG): container finished" podID="a2eb306a-ed7a-4481-ae0f-5dd3c3046d81" containerID="cbd66e4f5fe271e3ad76ab9c4422dba23228103a77ba7e97e882f314be1db169" exitCode=0 Nov 21 14:12:38 crc kubenswrapper[5133]: I1121 14:12:38.219411 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6d2rx" event={"ID":"a2eb306a-ed7a-4481-ae0f-5dd3c3046d81","Type":"ContainerDied","Data":"cbd66e4f5fe271e3ad76ab9c4422dba23228103a77ba7e97e882f314be1db169"} Nov 21 14:12:39 crc kubenswrapper[5133]: I1121 14:12:39.713886 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6d2rx" Nov 21 14:12:39 crc kubenswrapper[5133]: I1121 14:12:39.815121 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a2eb306a-ed7a-4481-ae0f-5dd3c3046d81-bootstrap-combined-ca-bundle\") pod \"a2eb306a-ed7a-4481-ae0f-5dd3c3046d81\" (UID: \"a2eb306a-ed7a-4481-ae0f-5dd3c3046d81\") " Nov 21 14:12:39 crc kubenswrapper[5133]: I1121 14:12:39.815293 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a2eb306a-ed7a-4481-ae0f-5dd3c3046d81-ssh-key\") pod \"a2eb306a-ed7a-4481-ae0f-5dd3c3046d81\" (UID: \"a2eb306a-ed7a-4481-ae0f-5dd3c3046d81\") " Nov 21 14:12:39 crc kubenswrapper[5133]: I1121 14:12:39.815320 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a2eb306a-ed7a-4481-ae0f-5dd3c3046d81-inventory\") pod \"a2eb306a-ed7a-4481-ae0f-5dd3c3046d81\" (UID: \"a2eb306a-ed7a-4481-ae0f-5dd3c3046d81\") " Nov 21 14:12:39 crc kubenswrapper[5133]: I1121 14:12:39.815505 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g8pqv\" (UniqueName: \"kubernetes.io/projected/a2eb306a-ed7a-4481-ae0f-5dd3c3046d81-kube-api-access-g8pqv\") pod \"a2eb306a-ed7a-4481-ae0f-5dd3c3046d81\" (UID: \"a2eb306a-ed7a-4481-ae0f-5dd3c3046d81\") " Nov 21 14:12:39 crc kubenswrapper[5133]: I1121 14:12:39.820989 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a2eb306a-ed7a-4481-ae0f-5dd3c3046d81-kube-api-access-g8pqv" (OuterVolumeSpecName: "kube-api-access-g8pqv") pod "a2eb306a-ed7a-4481-ae0f-5dd3c3046d81" (UID: "a2eb306a-ed7a-4481-ae0f-5dd3c3046d81"). InnerVolumeSpecName "kube-api-access-g8pqv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:12:39 crc kubenswrapper[5133]: I1121 14:12:39.821325 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a2eb306a-ed7a-4481-ae0f-5dd3c3046d81-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "a2eb306a-ed7a-4481-ae0f-5dd3c3046d81" (UID: "a2eb306a-ed7a-4481-ae0f-5dd3c3046d81"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:12:39 crc kubenswrapper[5133]: I1121 14:12:39.859444 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a2eb306a-ed7a-4481-ae0f-5dd3c3046d81-inventory" (OuterVolumeSpecName: "inventory") pod "a2eb306a-ed7a-4481-ae0f-5dd3c3046d81" (UID: "a2eb306a-ed7a-4481-ae0f-5dd3c3046d81"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:12:39 crc kubenswrapper[5133]: I1121 14:12:39.867386 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a2eb306a-ed7a-4481-ae0f-5dd3c3046d81-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "a2eb306a-ed7a-4481-ae0f-5dd3c3046d81" (UID: "a2eb306a-ed7a-4481-ae0f-5dd3c3046d81"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:12:39 crc kubenswrapper[5133]: I1121 14:12:39.917335 5133 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a2eb306a-ed7a-4481-ae0f-5dd3c3046d81-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 14:12:39 crc kubenswrapper[5133]: I1121 14:12:39.917382 5133 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a2eb306a-ed7a-4481-ae0f-5dd3c3046d81-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 21 14:12:39 crc kubenswrapper[5133]: I1121 14:12:39.917397 5133 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a2eb306a-ed7a-4481-ae0f-5dd3c3046d81-inventory\") on node \"crc\" DevicePath \"\"" Nov 21 14:12:39 crc kubenswrapper[5133]: I1121 14:12:39.917409 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g8pqv\" (UniqueName: \"kubernetes.io/projected/a2eb306a-ed7a-4481-ae0f-5dd3c3046d81-kube-api-access-g8pqv\") on node \"crc\" DevicePath \"\"" Nov 21 14:12:40 crc kubenswrapper[5133]: I1121 14:12:40.247543 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6d2rx" event={"ID":"a2eb306a-ed7a-4481-ae0f-5dd3c3046d81","Type":"ContainerDied","Data":"c4ce289da2e9f36d5148969523d7ba94f3b4e55d93d61cf51b0ea3d6e1daca93"} Nov 21 14:12:40 crc kubenswrapper[5133]: I1121 14:12:40.247608 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c4ce289da2e9f36d5148969523d7ba94f3b4e55d93d61cf51b0ea3d6e1daca93" Nov 21 14:12:40 crc kubenswrapper[5133]: I1121 14:12:40.247930 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6d2rx" Nov 21 14:12:40 crc kubenswrapper[5133]: I1121 14:12:40.358391 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-4hk46"] Nov 21 14:12:40 crc kubenswrapper[5133]: E1121 14:12:40.358940 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e24ebbe8-70f0-41d4-85b5-25c57f51c04b" containerName="registry-server" Nov 21 14:12:40 crc kubenswrapper[5133]: I1121 14:12:40.358966 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="e24ebbe8-70f0-41d4-85b5-25c57f51c04b" containerName="registry-server" Nov 21 14:12:40 crc kubenswrapper[5133]: E1121 14:12:40.358985 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e24ebbe8-70f0-41d4-85b5-25c57f51c04b" containerName="extract-utilities" Nov 21 14:12:40 crc kubenswrapper[5133]: I1121 14:12:40.359000 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="e24ebbe8-70f0-41d4-85b5-25c57f51c04b" containerName="extract-utilities" Nov 21 14:12:40 crc kubenswrapper[5133]: E1121 14:12:40.359015 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e24ebbe8-70f0-41d4-85b5-25c57f51c04b" containerName="extract-content" Nov 21 14:12:40 crc kubenswrapper[5133]: I1121 14:12:40.359044 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="e24ebbe8-70f0-41d4-85b5-25c57f51c04b" containerName="extract-content" Nov 21 14:12:40 crc kubenswrapper[5133]: E1121 14:12:40.359055 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2eb306a-ed7a-4481-ae0f-5dd3c3046d81" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 21 14:12:40 crc kubenswrapper[5133]: I1121 14:12:40.359067 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2eb306a-ed7a-4481-ae0f-5dd3c3046d81" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 21 14:12:40 crc kubenswrapper[5133]: I1121 14:12:40.359324 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="e24ebbe8-70f0-41d4-85b5-25c57f51c04b" containerName="registry-server" Nov 21 14:12:40 crc kubenswrapper[5133]: I1121 14:12:40.359366 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="a2eb306a-ed7a-4481-ae0f-5dd3c3046d81" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 21 14:12:40 crc kubenswrapper[5133]: I1121 14:12:40.360271 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-4hk46" Nov 21 14:12:40 crc kubenswrapper[5133]: I1121 14:12:40.363334 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 21 14:12:40 crc kubenswrapper[5133]: I1121 14:12:40.363467 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 21 14:12:40 crc kubenswrapper[5133]: I1121 14:12:40.363666 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 21 14:12:40 crc kubenswrapper[5133]: I1121 14:12:40.363954 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-ztmk4" Nov 21 14:12:40 crc kubenswrapper[5133]: I1121 14:12:40.368591 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-4hk46"] Nov 21 14:12:40 crc kubenswrapper[5133]: I1121 14:12:40.528242 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8300643f-e199-4e87-bee2-0d2e79fdf798-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-4hk46\" (UID: \"8300643f-e199-4e87-bee2-0d2e79fdf798\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-4hk46" Nov 21 14:12:40 crc kubenswrapper[5133]: I1121 14:12:40.528331 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8300643f-e199-4e87-bee2-0d2e79fdf798-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-4hk46\" (UID: \"8300643f-e199-4e87-bee2-0d2e79fdf798\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-4hk46" Nov 21 14:12:40 crc kubenswrapper[5133]: I1121 14:12:40.528508 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7wdjv\" (UniqueName: \"kubernetes.io/projected/8300643f-e199-4e87-bee2-0d2e79fdf798-kube-api-access-7wdjv\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-4hk46\" (UID: \"8300643f-e199-4e87-bee2-0d2e79fdf798\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-4hk46" Nov 21 14:12:40 crc kubenswrapper[5133]: I1121 14:12:40.630096 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8300643f-e199-4e87-bee2-0d2e79fdf798-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-4hk46\" (UID: \"8300643f-e199-4e87-bee2-0d2e79fdf798\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-4hk46" Nov 21 14:12:40 crc kubenswrapper[5133]: I1121 14:12:40.630167 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8300643f-e199-4e87-bee2-0d2e79fdf798-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-4hk46\" (UID: \"8300643f-e199-4e87-bee2-0d2e79fdf798\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-4hk46" Nov 21 14:12:40 crc kubenswrapper[5133]: I1121 14:12:40.630214 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7wdjv\" (UniqueName: \"kubernetes.io/projected/8300643f-e199-4e87-bee2-0d2e79fdf798-kube-api-access-7wdjv\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-4hk46\" (UID: \"8300643f-e199-4e87-bee2-0d2e79fdf798\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-4hk46" Nov 21 14:12:40 crc kubenswrapper[5133]: I1121 14:12:40.636091 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8300643f-e199-4e87-bee2-0d2e79fdf798-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-4hk46\" (UID: \"8300643f-e199-4e87-bee2-0d2e79fdf798\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-4hk46" Nov 21 14:12:40 crc kubenswrapper[5133]: I1121 14:12:40.636251 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8300643f-e199-4e87-bee2-0d2e79fdf798-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-4hk46\" (UID: \"8300643f-e199-4e87-bee2-0d2e79fdf798\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-4hk46" Nov 21 14:12:40 crc kubenswrapper[5133]: I1121 14:12:40.649236 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7wdjv\" (UniqueName: \"kubernetes.io/projected/8300643f-e199-4e87-bee2-0d2e79fdf798-kube-api-access-7wdjv\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-4hk46\" (UID: \"8300643f-e199-4e87-bee2-0d2e79fdf798\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-4hk46" Nov 21 14:12:40 crc kubenswrapper[5133]: I1121 14:12:40.685906 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-4hk46" Nov 21 14:12:41 crc kubenswrapper[5133]: I1121 14:12:41.275203 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-4hk46"] Nov 21 14:12:41 crc kubenswrapper[5133]: I1121 14:12:41.278815 5133 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 21 14:12:42 crc kubenswrapper[5133]: I1121 14:12:42.266906 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-4hk46" event={"ID":"8300643f-e199-4e87-bee2-0d2e79fdf798","Type":"ContainerStarted","Data":"fcfec1202350fee4a0f4eb21322dde6f71001844f038b4bcad89fdf33cc1798d"} Nov 21 14:12:42 crc kubenswrapper[5133]: I1121 14:12:42.267267 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-4hk46" event={"ID":"8300643f-e199-4e87-bee2-0d2e79fdf798","Type":"ContainerStarted","Data":"78810a50b907dc5560bd684e04f85a8903acea15fabaf46aa575ae5e1aeba291"} Nov 21 14:12:42 crc kubenswrapper[5133]: I1121 14:12:42.309659 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-4hk46" podStartSLOduration=1.758225075 podStartE2EDuration="2.309628301s" podCreationTimestamp="2025-11-21 14:12:40 +0000 UTC" firstStartedPulling="2025-11-21 14:12:41.278576009 +0000 UTC m=+1821.076408257" lastFinishedPulling="2025-11-21 14:12:41.829979195 +0000 UTC m=+1821.627811483" observedRunningTime="2025-11-21 14:12:42.292681788 +0000 UTC m=+1822.090514076" watchObservedRunningTime="2025-11-21 14:12:42.309628301 +0000 UTC m=+1822.107460559" Nov 21 14:12:44 crc kubenswrapper[5133]: I1121 14:12:44.458134 5133 scope.go:117] "RemoveContainer" containerID="532d197f04bffdd4c14ef24edeeddc3a83f198653923e9096885d7f28ddecf77" Nov 21 14:12:44 crc kubenswrapper[5133]: E1121 14:12:44.458695 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:12:56 crc kubenswrapper[5133]: I1121 14:12:56.061906 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-091f-account-create-m48fq"] Nov 21 14:12:56 crc kubenswrapper[5133]: I1121 14:12:56.071774 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-091f-account-create-m48fq"] Nov 21 14:12:56 crc kubenswrapper[5133]: I1121 14:12:56.467275 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="baacbad7-4746-4ce7-92a6-c820bdd0a2ac" path="/var/lib/kubelet/pods/baacbad7-4746-4ce7-92a6-c820bdd0a2ac/volumes" Nov 21 14:12:57 crc kubenswrapper[5133]: I1121 14:12:57.040451 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-jb7qk"] Nov 21 14:12:57 crc kubenswrapper[5133]: I1121 14:12:57.050579 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-jb7qk"] Nov 21 14:12:57 crc kubenswrapper[5133]: I1121 14:12:57.457887 5133 scope.go:117] "RemoveContainer" containerID="532d197f04bffdd4c14ef24edeeddc3a83f198653923e9096885d7f28ddecf77" Nov 21 14:12:57 crc kubenswrapper[5133]: E1121 14:12:57.458880 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:12:58 crc kubenswrapper[5133]: I1121 14:12:58.041106 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-01ea-account-create-5bjqm"] Nov 21 14:12:58 crc kubenswrapper[5133]: I1121 14:12:58.056850 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-9z6cc"] Nov 21 14:12:58 crc kubenswrapper[5133]: I1121 14:12:58.073227 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-01ea-account-create-5bjqm"] Nov 21 14:12:58 crc kubenswrapper[5133]: I1121 14:12:58.082422 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-9z6cc"] Nov 21 14:12:58 crc kubenswrapper[5133]: I1121 14:12:58.468585 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b1633232-e2fd-4172-b929-22af732c2a8f" path="/var/lib/kubelet/pods/b1633232-e2fd-4172-b929-22af732c2a8f/volumes" Nov 21 14:12:58 crc kubenswrapper[5133]: I1121 14:12:58.469400 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b48cb16b-2e16-4f3e-a083-e421d2ad2930" path="/var/lib/kubelet/pods/b48cb16b-2e16-4f3e-a083-e421d2ad2930/volumes" Nov 21 14:12:58 crc kubenswrapper[5133]: I1121 14:12:58.470210 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="edcec591-6e27-4287-abdf-71dd047698a9" path="/var/lib/kubelet/pods/edcec591-6e27-4287-abdf-71dd047698a9/volumes" Nov 21 14:13:04 crc kubenswrapper[5133]: I1121 14:13:04.029948 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-lrrtw"] Nov 21 14:13:04 crc kubenswrapper[5133]: I1121 14:13:04.038163 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-52d3-account-create-7b77h"] Nov 21 14:13:04 crc kubenswrapper[5133]: I1121 14:13:04.047606 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-52d3-account-create-7b77h"] Nov 21 14:13:04 crc kubenswrapper[5133]: I1121 14:13:04.056787 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-lrrtw"] Nov 21 14:13:04 crc kubenswrapper[5133]: I1121 14:13:04.468647 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0c98f849-edf5-4072-a707-fa87dc3f3b61" path="/var/lib/kubelet/pods/0c98f849-edf5-4072-a707-fa87dc3f3b61/volumes" Nov 21 14:13:04 crc kubenswrapper[5133]: I1121 14:13:04.469241 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ede4b5ec-0692-4ab1-857d-df00dea3d1bf" path="/var/lib/kubelet/pods/ede4b5ec-0692-4ab1-857d-df00dea3d1bf/volumes" Nov 21 14:13:11 crc kubenswrapper[5133]: I1121 14:13:11.458673 5133 scope.go:117] "RemoveContainer" containerID="532d197f04bffdd4c14ef24edeeddc3a83f198653923e9096885d7f28ddecf77" Nov 21 14:13:11 crc kubenswrapper[5133]: E1121 14:13:11.459809 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:13:22 crc kubenswrapper[5133]: I1121 14:13:22.463737 5133 scope.go:117] "RemoveContainer" containerID="532d197f04bffdd4c14ef24edeeddc3a83f198653923e9096885d7f28ddecf77" Nov 21 14:13:22 crc kubenswrapper[5133]: E1121 14:13:22.464553 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:13:33 crc kubenswrapper[5133]: I1121 14:13:33.295508 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-4wcn9"] Nov 21 14:13:33 crc kubenswrapper[5133]: I1121 14:13:33.299150 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4wcn9" Nov 21 14:13:33 crc kubenswrapper[5133]: I1121 14:13:33.326790 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-4wcn9"] Nov 21 14:13:33 crc kubenswrapper[5133]: I1121 14:13:33.423089 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/de7f693a-429e-4ecd-9444-9c19b82e0a4c-catalog-content\") pod \"redhat-operators-4wcn9\" (UID: \"de7f693a-429e-4ecd-9444-9c19b82e0a4c\") " pod="openshift-marketplace/redhat-operators-4wcn9" Nov 21 14:13:33 crc kubenswrapper[5133]: I1121 14:13:33.423140 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-js787\" (UniqueName: \"kubernetes.io/projected/de7f693a-429e-4ecd-9444-9c19b82e0a4c-kube-api-access-js787\") pod \"redhat-operators-4wcn9\" (UID: \"de7f693a-429e-4ecd-9444-9c19b82e0a4c\") " pod="openshift-marketplace/redhat-operators-4wcn9" Nov 21 14:13:33 crc kubenswrapper[5133]: I1121 14:13:33.423415 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/de7f693a-429e-4ecd-9444-9c19b82e0a4c-utilities\") pod \"redhat-operators-4wcn9\" (UID: \"de7f693a-429e-4ecd-9444-9c19b82e0a4c\") " pod="openshift-marketplace/redhat-operators-4wcn9" Nov 21 14:13:33 crc kubenswrapper[5133]: I1121 14:13:33.525903 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/de7f693a-429e-4ecd-9444-9c19b82e0a4c-catalog-content\") pod \"redhat-operators-4wcn9\" (UID: \"de7f693a-429e-4ecd-9444-9c19b82e0a4c\") " pod="openshift-marketplace/redhat-operators-4wcn9" Nov 21 14:13:33 crc kubenswrapper[5133]: I1121 14:13:33.525969 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-js787\" (UniqueName: \"kubernetes.io/projected/de7f693a-429e-4ecd-9444-9c19b82e0a4c-kube-api-access-js787\") pod \"redhat-operators-4wcn9\" (UID: \"de7f693a-429e-4ecd-9444-9c19b82e0a4c\") " pod="openshift-marketplace/redhat-operators-4wcn9" Nov 21 14:13:33 crc kubenswrapper[5133]: I1121 14:13:33.526102 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/de7f693a-429e-4ecd-9444-9c19b82e0a4c-utilities\") pod \"redhat-operators-4wcn9\" (UID: \"de7f693a-429e-4ecd-9444-9c19b82e0a4c\") " pod="openshift-marketplace/redhat-operators-4wcn9" Nov 21 14:13:33 crc kubenswrapper[5133]: I1121 14:13:33.527604 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/de7f693a-429e-4ecd-9444-9c19b82e0a4c-catalog-content\") pod \"redhat-operators-4wcn9\" (UID: \"de7f693a-429e-4ecd-9444-9c19b82e0a4c\") " pod="openshift-marketplace/redhat-operators-4wcn9" Nov 21 14:13:33 crc kubenswrapper[5133]: I1121 14:13:33.528120 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/de7f693a-429e-4ecd-9444-9c19b82e0a4c-utilities\") pod \"redhat-operators-4wcn9\" (UID: \"de7f693a-429e-4ecd-9444-9c19b82e0a4c\") " pod="openshift-marketplace/redhat-operators-4wcn9" Nov 21 14:13:33 crc kubenswrapper[5133]: I1121 14:13:33.559631 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-js787\" (UniqueName: \"kubernetes.io/projected/de7f693a-429e-4ecd-9444-9c19b82e0a4c-kube-api-access-js787\") pod \"redhat-operators-4wcn9\" (UID: \"de7f693a-429e-4ecd-9444-9c19b82e0a4c\") " pod="openshift-marketplace/redhat-operators-4wcn9" Nov 21 14:13:33 crc kubenswrapper[5133]: I1121 14:13:33.628059 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4wcn9" Nov 21 14:13:33 crc kubenswrapper[5133]: I1121 14:13:33.952626 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-4wcn9"] Nov 21 14:13:34 crc kubenswrapper[5133]: I1121 14:13:34.042313 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-f69d-account-create-6ct9x"] Nov 21 14:13:34 crc kubenswrapper[5133]: I1121 14:13:34.058246 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-fj9b9"] Nov 21 14:13:34 crc kubenswrapper[5133]: I1121 14:13:34.069748 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-f69d-account-create-6ct9x"] Nov 21 14:13:34 crc kubenswrapper[5133]: I1121 14:13:34.077520 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-fj9b9"] Nov 21 14:13:34 crc kubenswrapper[5133]: I1121 14:13:34.457804 5133 scope.go:117] "RemoveContainer" containerID="532d197f04bffdd4c14ef24edeeddc3a83f198653923e9096885d7f28ddecf77" Nov 21 14:13:34 crc kubenswrapper[5133]: E1121 14:13:34.458467 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:13:34 crc kubenswrapper[5133]: I1121 14:13:34.466883 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43b30a4a-3bf5-4a77-9422-01336a998d47" path="/var/lib/kubelet/pods/43b30a4a-3bf5-4a77-9422-01336a998d47/volumes" Nov 21 14:13:34 crc kubenswrapper[5133]: I1121 14:13:34.467727 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ab41e086-5fe6-40db-9293-7f8e01f43b08" path="/var/lib/kubelet/pods/ab41e086-5fe6-40db-9293-7f8e01f43b08/volumes" Nov 21 14:13:34 crc kubenswrapper[5133]: I1121 14:13:34.846889 5133 generic.go:334] "Generic (PLEG): container finished" podID="de7f693a-429e-4ecd-9444-9c19b82e0a4c" containerID="3d541f87a3c5cd3acd70e39a21335bca090769e6e3116b51a86842e33d334a50" exitCode=0 Nov 21 14:13:34 crc kubenswrapper[5133]: I1121 14:13:34.846992 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4wcn9" event={"ID":"de7f693a-429e-4ecd-9444-9c19b82e0a4c","Type":"ContainerDied","Data":"3d541f87a3c5cd3acd70e39a21335bca090769e6e3116b51a86842e33d334a50"} Nov 21 14:13:34 crc kubenswrapper[5133]: I1121 14:13:34.847267 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4wcn9" event={"ID":"de7f693a-429e-4ecd-9444-9c19b82e0a4c","Type":"ContainerStarted","Data":"b24b1423d1c568904555eca3ede6fea08d929dd6ab9938cc61898402c37d4b03"} Nov 21 14:13:35 crc kubenswrapper[5133]: I1121 14:13:35.058501 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-ghlcg"] Nov 21 14:13:35 crc kubenswrapper[5133]: I1121 14:13:35.069865 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-6888-account-create-sk6x7"] Nov 21 14:13:35 crc kubenswrapper[5133]: I1121 14:13:35.079737 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-18e5-account-create-rmg6q"] Nov 21 14:13:35 crc kubenswrapper[5133]: I1121 14:13:35.088989 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-7vjhl"] Nov 21 14:13:35 crc kubenswrapper[5133]: I1121 14:13:35.098955 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-ghlcg"] Nov 21 14:13:35 crc kubenswrapper[5133]: I1121 14:13:35.107449 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-6888-account-create-sk6x7"] Nov 21 14:13:35 crc kubenswrapper[5133]: I1121 14:13:35.113423 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-18e5-account-create-rmg6q"] Nov 21 14:13:35 crc kubenswrapper[5133]: I1121 14:13:35.119497 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-7vjhl"] Nov 21 14:13:35 crc kubenswrapper[5133]: I1121 14:13:35.390093 5133 scope.go:117] "RemoveContainer" containerID="fc48dddf1308fed80597b3816c44b9c98a5812e090a4b8acafd9ca3f4b61a18d" Nov 21 14:13:35 crc kubenswrapper[5133]: I1121 14:13:35.410740 5133 scope.go:117] "RemoveContainer" containerID="51474f06ea677383540766f25941905209f4a26d9fc93361b532f2473b8e534b" Nov 21 14:13:35 crc kubenswrapper[5133]: I1121 14:13:35.461131 5133 scope.go:117] "RemoveContainer" containerID="7f3a6b02b119aedbd74ffd4430252a5eada59d520b2b664e9a776cae08326942" Nov 21 14:13:35 crc kubenswrapper[5133]: I1121 14:13:35.540985 5133 scope.go:117] "RemoveContainer" containerID="da5ed15d2c49c142e0474a16aa74fa58366bf518a297597d2b94ef2b43e9a159" Nov 21 14:13:35 crc kubenswrapper[5133]: I1121 14:13:35.580956 5133 scope.go:117] "RemoveContainer" containerID="82d4bdea738c4b67f9b4bc1a3b9e96a52ba47d5f691097cd4ff760c1a2c5a272" Nov 21 14:13:35 crc kubenswrapper[5133]: I1121 14:13:35.628739 5133 scope.go:117] "RemoveContainer" containerID="b65f98abb1a499f4f403b4c63640a574e05086e10dcc6a467bc4331c9e681552" Nov 21 14:13:35 crc kubenswrapper[5133]: I1121 14:13:35.667868 5133 scope.go:117] "RemoveContainer" containerID="4c54d1c5b88d2133b7f2fe3ad66be6d7a5f34ff7941f062ce9351b4dd06e640b" Nov 21 14:13:35 crc kubenswrapper[5133]: I1121 14:13:35.688750 5133 scope.go:117] "RemoveContainer" containerID="3e625539d166fa20ea2699dbd1573ac0028ad111713f065c62258f9e32a9801d" Nov 21 14:13:36 crc kubenswrapper[5133]: I1121 14:13:36.469963 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2d89170b-8c95-42fa-90bd-35edc597fc2f" path="/var/lib/kubelet/pods/2d89170b-8c95-42fa-90bd-35edc597fc2f/volumes" Nov 21 14:13:36 crc kubenswrapper[5133]: I1121 14:13:36.471089 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a35d3d79-1e17-4750-9246-57363851ddd0" path="/var/lib/kubelet/pods/a35d3d79-1e17-4750-9246-57363851ddd0/volumes" Nov 21 14:13:36 crc kubenswrapper[5133]: I1121 14:13:36.471908 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b48c94e4-c8f8-42fb-90b3-d300991ac5d6" path="/var/lib/kubelet/pods/b48c94e4-c8f8-42fb-90b3-d300991ac5d6/volumes" Nov 21 14:13:36 crc kubenswrapper[5133]: I1121 14:13:36.472650 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d32a6944-2567-4e79-9fee-d0aecb16f40e" path="/var/lib/kubelet/pods/d32a6944-2567-4e79-9fee-d0aecb16f40e/volumes" Nov 21 14:13:36 crc kubenswrapper[5133]: I1121 14:13:36.872540 5133 generic.go:334] "Generic (PLEG): container finished" podID="de7f693a-429e-4ecd-9444-9c19b82e0a4c" containerID="f9fb6864f8d8e8d1d7b73495e6ea9866152ea0d2b9f857d5cbe150588bcc703a" exitCode=0 Nov 21 14:13:36 crc kubenswrapper[5133]: I1121 14:13:36.872879 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4wcn9" event={"ID":"de7f693a-429e-4ecd-9444-9c19b82e0a4c","Type":"ContainerDied","Data":"f9fb6864f8d8e8d1d7b73495e6ea9866152ea0d2b9f857d5cbe150588bcc703a"} Nov 21 14:13:40 crc kubenswrapper[5133]: I1121 14:13:40.929850 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4wcn9" event={"ID":"de7f693a-429e-4ecd-9444-9c19b82e0a4c","Type":"ContainerStarted","Data":"614e62dee8cded38b8a0d2e80bdf8b78ddb20ab1a8437611b465abd21bd7274e"} Nov 21 14:13:40 crc kubenswrapper[5133]: I1121 14:13:40.953148 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-4wcn9" podStartSLOduration=3.963745842 podStartE2EDuration="7.953126656s" podCreationTimestamp="2025-11-21 14:13:33 +0000 UTC" firstStartedPulling="2025-11-21 14:13:34.848852832 +0000 UTC m=+1874.646685080" lastFinishedPulling="2025-11-21 14:13:38.838233646 +0000 UTC m=+1878.636065894" observedRunningTime="2025-11-21 14:13:40.948218904 +0000 UTC m=+1880.746051182" watchObservedRunningTime="2025-11-21 14:13:40.953126656 +0000 UTC m=+1880.750958904" Nov 21 14:13:43 crc kubenswrapper[5133]: I1121 14:13:43.045271 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-tshmb"] Nov 21 14:13:43 crc kubenswrapper[5133]: I1121 14:13:43.057929 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-tshmb"] Nov 21 14:13:43 crc kubenswrapper[5133]: I1121 14:13:43.628810 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-4wcn9" Nov 21 14:13:43 crc kubenswrapper[5133]: I1121 14:13:43.628886 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-4wcn9" Nov 21 14:13:44 crc kubenswrapper[5133]: I1121 14:13:44.472584 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="12e06aee-ac34-4d50-aa07-42f326d55fb2" path="/var/lib/kubelet/pods/12e06aee-ac34-4d50-aa07-42f326d55fb2/volumes" Nov 21 14:13:44 crc kubenswrapper[5133]: I1121 14:13:44.696408 5133 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-4wcn9" podUID="de7f693a-429e-4ecd-9444-9c19b82e0a4c" containerName="registry-server" probeResult="failure" output=< Nov 21 14:13:44 crc kubenswrapper[5133]: timeout: failed to connect service ":50051" within 1s Nov 21 14:13:44 crc kubenswrapper[5133]: > Nov 21 14:13:46 crc kubenswrapper[5133]: I1121 14:13:46.458653 5133 scope.go:117] "RemoveContainer" containerID="532d197f04bffdd4c14ef24edeeddc3a83f198653923e9096885d7f28ddecf77" Nov 21 14:13:46 crc kubenswrapper[5133]: E1121 14:13:46.460388 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:13:53 crc kubenswrapper[5133]: I1121 14:13:53.681483 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-4wcn9" Nov 21 14:13:53 crc kubenswrapper[5133]: I1121 14:13:53.763372 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-4wcn9" Nov 21 14:13:53 crc kubenswrapper[5133]: I1121 14:13:53.933780 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-4wcn9"] Nov 21 14:13:55 crc kubenswrapper[5133]: I1121 14:13:55.096590 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-4wcn9" podUID="de7f693a-429e-4ecd-9444-9c19b82e0a4c" containerName="registry-server" containerID="cri-o://614e62dee8cded38b8a0d2e80bdf8b78ddb20ab1a8437611b465abd21bd7274e" gracePeriod=2 Nov 21 14:13:55 crc kubenswrapper[5133]: I1121 14:13:55.560447 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4wcn9" Nov 21 14:13:55 crc kubenswrapper[5133]: I1121 14:13:55.671875 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/de7f693a-429e-4ecd-9444-9c19b82e0a4c-catalog-content\") pod \"de7f693a-429e-4ecd-9444-9c19b82e0a4c\" (UID: \"de7f693a-429e-4ecd-9444-9c19b82e0a4c\") " Nov 21 14:13:55 crc kubenswrapper[5133]: I1121 14:13:55.672173 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-js787\" (UniqueName: \"kubernetes.io/projected/de7f693a-429e-4ecd-9444-9c19b82e0a4c-kube-api-access-js787\") pod \"de7f693a-429e-4ecd-9444-9c19b82e0a4c\" (UID: \"de7f693a-429e-4ecd-9444-9c19b82e0a4c\") " Nov 21 14:13:55 crc kubenswrapper[5133]: I1121 14:13:55.672349 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/de7f693a-429e-4ecd-9444-9c19b82e0a4c-utilities\") pod \"de7f693a-429e-4ecd-9444-9c19b82e0a4c\" (UID: \"de7f693a-429e-4ecd-9444-9c19b82e0a4c\") " Nov 21 14:13:55 crc kubenswrapper[5133]: I1121 14:13:55.673851 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/de7f693a-429e-4ecd-9444-9c19b82e0a4c-utilities" (OuterVolumeSpecName: "utilities") pod "de7f693a-429e-4ecd-9444-9c19b82e0a4c" (UID: "de7f693a-429e-4ecd-9444-9c19b82e0a4c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:13:55 crc kubenswrapper[5133]: I1121 14:13:55.681434 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/de7f693a-429e-4ecd-9444-9c19b82e0a4c-kube-api-access-js787" (OuterVolumeSpecName: "kube-api-access-js787") pod "de7f693a-429e-4ecd-9444-9c19b82e0a4c" (UID: "de7f693a-429e-4ecd-9444-9c19b82e0a4c"). InnerVolumeSpecName "kube-api-access-js787". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:13:55 crc kubenswrapper[5133]: I1121 14:13:55.770331 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/de7f693a-429e-4ecd-9444-9c19b82e0a4c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "de7f693a-429e-4ecd-9444-9c19b82e0a4c" (UID: "de7f693a-429e-4ecd-9444-9c19b82e0a4c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:13:55 crc kubenswrapper[5133]: I1121 14:13:55.775378 5133 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/de7f693a-429e-4ecd-9444-9c19b82e0a4c-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 14:13:55 crc kubenswrapper[5133]: I1121 14:13:55.775422 5133 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/de7f693a-429e-4ecd-9444-9c19b82e0a4c-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 14:13:55 crc kubenswrapper[5133]: I1121 14:13:55.775445 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-js787\" (UniqueName: \"kubernetes.io/projected/de7f693a-429e-4ecd-9444-9c19b82e0a4c-kube-api-access-js787\") on node \"crc\" DevicePath \"\"" Nov 21 14:13:56 crc kubenswrapper[5133]: I1121 14:13:56.116489 5133 generic.go:334] "Generic (PLEG): container finished" podID="de7f693a-429e-4ecd-9444-9c19b82e0a4c" containerID="614e62dee8cded38b8a0d2e80bdf8b78ddb20ab1a8437611b465abd21bd7274e" exitCode=0 Nov 21 14:13:56 crc kubenswrapper[5133]: I1121 14:13:56.116547 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4wcn9" event={"ID":"de7f693a-429e-4ecd-9444-9c19b82e0a4c","Type":"ContainerDied","Data":"614e62dee8cded38b8a0d2e80bdf8b78ddb20ab1a8437611b465abd21bd7274e"} Nov 21 14:13:56 crc kubenswrapper[5133]: I1121 14:13:56.116623 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4wcn9" event={"ID":"de7f693a-429e-4ecd-9444-9c19b82e0a4c","Type":"ContainerDied","Data":"b24b1423d1c568904555eca3ede6fea08d929dd6ab9938cc61898402c37d4b03"} Nov 21 14:13:56 crc kubenswrapper[5133]: I1121 14:13:56.116627 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4wcn9" Nov 21 14:13:56 crc kubenswrapper[5133]: I1121 14:13:56.116656 5133 scope.go:117] "RemoveContainer" containerID="614e62dee8cded38b8a0d2e80bdf8b78ddb20ab1a8437611b465abd21bd7274e" Nov 21 14:13:56 crc kubenswrapper[5133]: I1121 14:13:56.164854 5133 scope.go:117] "RemoveContainer" containerID="f9fb6864f8d8e8d1d7b73495e6ea9866152ea0d2b9f857d5cbe150588bcc703a" Nov 21 14:13:56 crc kubenswrapper[5133]: I1121 14:13:56.198161 5133 scope.go:117] "RemoveContainer" containerID="3d541f87a3c5cd3acd70e39a21335bca090769e6e3116b51a86842e33d334a50" Nov 21 14:13:56 crc kubenswrapper[5133]: I1121 14:13:56.199717 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-4wcn9"] Nov 21 14:13:56 crc kubenswrapper[5133]: I1121 14:13:56.212182 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-4wcn9"] Nov 21 14:13:56 crc kubenswrapper[5133]: I1121 14:13:56.249557 5133 scope.go:117] "RemoveContainer" containerID="614e62dee8cded38b8a0d2e80bdf8b78ddb20ab1a8437611b465abd21bd7274e" Nov 21 14:13:56 crc kubenswrapper[5133]: E1121 14:13:56.250061 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"614e62dee8cded38b8a0d2e80bdf8b78ddb20ab1a8437611b465abd21bd7274e\": container with ID starting with 614e62dee8cded38b8a0d2e80bdf8b78ddb20ab1a8437611b465abd21bd7274e not found: ID does not exist" containerID="614e62dee8cded38b8a0d2e80bdf8b78ddb20ab1a8437611b465abd21bd7274e" Nov 21 14:13:56 crc kubenswrapper[5133]: I1121 14:13:56.250097 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"614e62dee8cded38b8a0d2e80bdf8b78ddb20ab1a8437611b465abd21bd7274e"} err="failed to get container status \"614e62dee8cded38b8a0d2e80bdf8b78ddb20ab1a8437611b465abd21bd7274e\": rpc error: code = NotFound desc = could not find container \"614e62dee8cded38b8a0d2e80bdf8b78ddb20ab1a8437611b465abd21bd7274e\": container with ID starting with 614e62dee8cded38b8a0d2e80bdf8b78ddb20ab1a8437611b465abd21bd7274e not found: ID does not exist" Nov 21 14:13:56 crc kubenswrapper[5133]: I1121 14:13:56.250125 5133 scope.go:117] "RemoveContainer" containerID="f9fb6864f8d8e8d1d7b73495e6ea9866152ea0d2b9f857d5cbe150588bcc703a" Nov 21 14:13:56 crc kubenswrapper[5133]: E1121 14:13:56.250513 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f9fb6864f8d8e8d1d7b73495e6ea9866152ea0d2b9f857d5cbe150588bcc703a\": container with ID starting with f9fb6864f8d8e8d1d7b73495e6ea9866152ea0d2b9f857d5cbe150588bcc703a not found: ID does not exist" containerID="f9fb6864f8d8e8d1d7b73495e6ea9866152ea0d2b9f857d5cbe150588bcc703a" Nov 21 14:13:56 crc kubenswrapper[5133]: I1121 14:13:56.250545 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f9fb6864f8d8e8d1d7b73495e6ea9866152ea0d2b9f857d5cbe150588bcc703a"} err="failed to get container status \"f9fb6864f8d8e8d1d7b73495e6ea9866152ea0d2b9f857d5cbe150588bcc703a\": rpc error: code = NotFound desc = could not find container \"f9fb6864f8d8e8d1d7b73495e6ea9866152ea0d2b9f857d5cbe150588bcc703a\": container with ID starting with f9fb6864f8d8e8d1d7b73495e6ea9866152ea0d2b9f857d5cbe150588bcc703a not found: ID does not exist" Nov 21 14:13:56 crc kubenswrapper[5133]: I1121 14:13:56.250563 5133 scope.go:117] "RemoveContainer" containerID="3d541f87a3c5cd3acd70e39a21335bca090769e6e3116b51a86842e33d334a50" Nov 21 14:13:56 crc kubenswrapper[5133]: E1121 14:13:56.250844 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3d541f87a3c5cd3acd70e39a21335bca090769e6e3116b51a86842e33d334a50\": container with ID starting with 3d541f87a3c5cd3acd70e39a21335bca090769e6e3116b51a86842e33d334a50 not found: ID does not exist" containerID="3d541f87a3c5cd3acd70e39a21335bca090769e6e3116b51a86842e33d334a50" Nov 21 14:13:56 crc kubenswrapper[5133]: I1121 14:13:56.250876 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3d541f87a3c5cd3acd70e39a21335bca090769e6e3116b51a86842e33d334a50"} err="failed to get container status \"3d541f87a3c5cd3acd70e39a21335bca090769e6e3116b51a86842e33d334a50\": rpc error: code = NotFound desc = could not find container \"3d541f87a3c5cd3acd70e39a21335bca090769e6e3116b51a86842e33d334a50\": container with ID starting with 3d541f87a3c5cd3acd70e39a21335bca090769e6e3116b51a86842e33d334a50 not found: ID does not exist" Nov 21 14:13:56 crc kubenswrapper[5133]: I1121 14:13:56.468406 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="de7f693a-429e-4ecd-9444-9c19b82e0a4c" path="/var/lib/kubelet/pods/de7f693a-429e-4ecd-9444-9c19b82e0a4c/volumes" Nov 21 14:14:00 crc kubenswrapper[5133]: I1121 14:14:00.460090 5133 scope.go:117] "RemoveContainer" containerID="532d197f04bffdd4c14ef24edeeddc3a83f198653923e9096885d7f28ddecf77" Nov 21 14:14:00 crc kubenswrapper[5133]: E1121 14:14:00.460873 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:14:02 crc kubenswrapper[5133]: I1121 14:14:02.182989 5133 generic.go:334] "Generic (PLEG): container finished" podID="8300643f-e199-4e87-bee2-0d2e79fdf798" containerID="fcfec1202350fee4a0f4eb21322dde6f71001844f038b4bcad89fdf33cc1798d" exitCode=0 Nov 21 14:14:02 crc kubenswrapper[5133]: I1121 14:14:02.183035 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-4hk46" event={"ID":"8300643f-e199-4e87-bee2-0d2e79fdf798","Type":"ContainerDied","Data":"fcfec1202350fee4a0f4eb21322dde6f71001844f038b4bcad89fdf33cc1798d"} Nov 21 14:14:03 crc kubenswrapper[5133]: I1121 14:14:03.670044 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-4hk46" Nov 21 14:14:03 crc kubenswrapper[5133]: I1121 14:14:03.735955 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8300643f-e199-4e87-bee2-0d2e79fdf798-ssh-key\") pod \"8300643f-e199-4e87-bee2-0d2e79fdf798\" (UID: \"8300643f-e199-4e87-bee2-0d2e79fdf798\") " Nov 21 14:14:03 crc kubenswrapper[5133]: I1121 14:14:03.736160 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7wdjv\" (UniqueName: \"kubernetes.io/projected/8300643f-e199-4e87-bee2-0d2e79fdf798-kube-api-access-7wdjv\") pod \"8300643f-e199-4e87-bee2-0d2e79fdf798\" (UID: \"8300643f-e199-4e87-bee2-0d2e79fdf798\") " Nov 21 14:14:03 crc kubenswrapper[5133]: I1121 14:14:03.736222 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8300643f-e199-4e87-bee2-0d2e79fdf798-inventory\") pod \"8300643f-e199-4e87-bee2-0d2e79fdf798\" (UID: \"8300643f-e199-4e87-bee2-0d2e79fdf798\") " Nov 21 14:14:03 crc kubenswrapper[5133]: I1121 14:14:03.741382 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8300643f-e199-4e87-bee2-0d2e79fdf798-kube-api-access-7wdjv" (OuterVolumeSpecName: "kube-api-access-7wdjv") pod "8300643f-e199-4e87-bee2-0d2e79fdf798" (UID: "8300643f-e199-4e87-bee2-0d2e79fdf798"). InnerVolumeSpecName "kube-api-access-7wdjv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:14:03 crc kubenswrapper[5133]: I1121 14:14:03.764547 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8300643f-e199-4e87-bee2-0d2e79fdf798-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "8300643f-e199-4e87-bee2-0d2e79fdf798" (UID: "8300643f-e199-4e87-bee2-0d2e79fdf798"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:14:03 crc kubenswrapper[5133]: I1121 14:14:03.774162 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8300643f-e199-4e87-bee2-0d2e79fdf798-inventory" (OuterVolumeSpecName: "inventory") pod "8300643f-e199-4e87-bee2-0d2e79fdf798" (UID: "8300643f-e199-4e87-bee2-0d2e79fdf798"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:14:03 crc kubenswrapper[5133]: I1121 14:14:03.839320 5133 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8300643f-e199-4e87-bee2-0d2e79fdf798-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 21 14:14:03 crc kubenswrapper[5133]: I1121 14:14:03.839509 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7wdjv\" (UniqueName: \"kubernetes.io/projected/8300643f-e199-4e87-bee2-0d2e79fdf798-kube-api-access-7wdjv\") on node \"crc\" DevicePath \"\"" Nov 21 14:14:03 crc kubenswrapper[5133]: I1121 14:14:03.839598 5133 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8300643f-e199-4e87-bee2-0d2e79fdf798-inventory\") on node \"crc\" DevicePath \"\"" Nov 21 14:14:04 crc kubenswrapper[5133]: I1121 14:14:04.206594 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-4hk46" event={"ID":"8300643f-e199-4e87-bee2-0d2e79fdf798","Type":"ContainerDied","Data":"78810a50b907dc5560bd684e04f85a8903acea15fabaf46aa575ae5e1aeba291"} Nov 21 14:14:04 crc kubenswrapper[5133]: I1121 14:14:04.206646 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="78810a50b907dc5560bd684e04f85a8903acea15fabaf46aa575ae5e1aeba291" Nov 21 14:14:04 crc kubenswrapper[5133]: I1121 14:14:04.206680 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-4hk46" Nov 21 14:14:04 crc kubenswrapper[5133]: I1121 14:14:04.306556 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-c2jrr"] Nov 21 14:14:04 crc kubenswrapper[5133]: E1121 14:14:04.306921 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de7f693a-429e-4ecd-9444-9c19b82e0a4c" containerName="extract-content" Nov 21 14:14:04 crc kubenswrapper[5133]: I1121 14:14:04.306933 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="de7f693a-429e-4ecd-9444-9c19b82e0a4c" containerName="extract-content" Nov 21 14:14:04 crc kubenswrapper[5133]: E1121 14:14:04.306944 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8300643f-e199-4e87-bee2-0d2e79fdf798" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 21 14:14:04 crc kubenswrapper[5133]: I1121 14:14:04.306951 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="8300643f-e199-4e87-bee2-0d2e79fdf798" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 21 14:14:04 crc kubenswrapper[5133]: E1121 14:14:04.306964 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de7f693a-429e-4ecd-9444-9c19b82e0a4c" containerName="registry-server" Nov 21 14:14:04 crc kubenswrapper[5133]: I1121 14:14:04.306969 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="de7f693a-429e-4ecd-9444-9c19b82e0a4c" containerName="registry-server" Nov 21 14:14:04 crc kubenswrapper[5133]: E1121 14:14:04.306984 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de7f693a-429e-4ecd-9444-9c19b82e0a4c" containerName="extract-utilities" Nov 21 14:14:04 crc kubenswrapper[5133]: I1121 14:14:04.306990 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="de7f693a-429e-4ecd-9444-9c19b82e0a4c" containerName="extract-utilities" Nov 21 14:14:04 crc kubenswrapper[5133]: I1121 14:14:04.307144 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="8300643f-e199-4e87-bee2-0d2e79fdf798" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 21 14:14:04 crc kubenswrapper[5133]: I1121 14:14:04.307172 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="de7f693a-429e-4ecd-9444-9c19b82e0a4c" containerName="registry-server" Nov 21 14:14:04 crc kubenswrapper[5133]: I1121 14:14:04.307732 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-c2jrr" Nov 21 14:14:04 crc kubenswrapper[5133]: I1121 14:14:04.311453 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 21 14:14:04 crc kubenswrapper[5133]: I1121 14:14:04.312188 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-ztmk4" Nov 21 14:14:04 crc kubenswrapper[5133]: I1121 14:14:04.314041 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 21 14:14:04 crc kubenswrapper[5133]: I1121 14:14:04.314857 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 21 14:14:04 crc kubenswrapper[5133]: I1121 14:14:04.317539 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-c2jrr"] Nov 21 14:14:04 crc kubenswrapper[5133]: I1121 14:14:04.351916 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/50083ce9-d8d7-40f9-aceb-4fd801670062-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-c2jrr\" (UID: \"50083ce9-d8d7-40f9-aceb-4fd801670062\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-c2jrr" Nov 21 14:14:04 crc kubenswrapper[5133]: I1121 14:14:04.352368 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/50083ce9-d8d7-40f9-aceb-4fd801670062-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-c2jrr\" (UID: \"50083ce9-d8d7-40f9-aceb-4fd801670062\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-c2jrr" Nov 21 14:14:04 crc kubenswrapper[5133]: I1121 14:14:04.352404 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tjqnv\" (UniqueName: \"kubernetes.io/projected/50083ce9-d8d7-40f9-aceb-4fd801670062-kube-api-access-tjqnv\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-c2jrr\" (UID: \"50083ce9-d8d7-40f9-aceb-4fd801670062\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-c2jrr" Nov 21 14:14:04 crc kubenswrapper[5133]: I1121 14:14:04.453779 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/50083ce9-d8d7-40f9-aceb-4fd801670062-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-c2jrr\" (UID: \"50083ce9-d8d7-40f9-aceb-4fd801670062\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-c2jrr" Nov 21 14:14:04 crc kubenswrapper[5133]: I1121 14:14:04.453895 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/50083ce9-d8d7-40f9-aceb-4fd801670062-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-c2jrr\" (UID: \"50083ce9-d8d7-40f9-aceb-4fd801670062\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-c2jrr" Nov 21 14:14:04 crc kubenswrapper[5133]: I1121 14:14:04.453936 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tjqnv\" (UniqueName: \"kubernetes.io/projected/50083ce9-d8d7-40f9-aceb-4fd801670062-kube-api-access-tjqnv\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-c2jrr\" (UID: \"50083ce9-d8d7-40f9-aceb-4fd801670062\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-c2jrr" Nov 21 14:14:04 crc kubenswrapper[5133]: I1121 14:14:04.458631 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/50083ce9-d8d7-40f9-aceb-4fd801670062-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-c2jrr\" (UID: \"50083ce9-d8d7-40f9-aceb-4fd801670062\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-c2jrr" Nov 21 14:14:04 crc kubenswrapper[5133]: I1121 14:14:04.460549 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/50083ce9-d8d7-40f9-aceb-4fd801670062-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-c2jrr\" (UID: \"50083ce9-d8d7-40f9-aceb-4fd801670062\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-c2jrr" Nov 21 14:14:04 crc kubenswrapper[5133]: I1121 14:14:04.472884 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tjqnv\" (UniqueName: \"kubernetes.io/projected/50083ce9-d8d7-40f9-aceb-4fd801670062-kube-api-access-tjqnv\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-c2jrr\" (UID: \"50083ce9-d8d7-40f9-aceb-4fd801670062\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-c2jrr" Nov 21 14:14:04 crc kubenswrapper[5133]: I1121 14:14:04.638602 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-c2jrr" Nov 21 14:14:05 crc kubenswrapper[5133]: I1121 14:14:05.133782 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-c2jrr"] Nov 21 14:14:05 crc kubenswrapper[5133]: W1121 14:14:05.140888 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod50083ce9_d8d7_40f9_aceb_4fd801670062.slice/crio-e030f120282e3a0e17f31c04c45071228e99fb9d82e87b46482e3a075369cba2 WatchSource:0}: Error finding container e030f120282e3a0e17f31c04c45071228e99fb9d82e87b46482e3a075369cba2: Status 404 returned error can't find the container with id e030f120282e3a0e17f31c04c45071228e99fb9d82e87b46482e3a075369cba2 Nov 21 14:14:05 crc kubenswrapper[5133]: I1121 14:14:05.217362 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-c2jrr" event={"ID":"50083ce9-d8d7-40f9-aceb-4fd801670062","Type":"ContainerStarted","Data":"e030f120282e3a0e17f31c04c45071228e99fb9d82e87b46482e3a075369cba2"} Nov 21 14:14:06 crc kubenswrapper[5133]: I1121 14:14:06.234182 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-c2jrr" event={"ID":"50083ce9-d8d7-40f9-aceb-4fd801670062","Type":"ContainerStarted","Data":"c5eb8cff7df0169eda8c94fb4dd988a2c53d2c5325725035badd2cf7b1d644c5"} Nov 21 14:14:06 crc kubenswrapper[5133]: I1121 14:14:06.250933 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-c2jrr" podStartSLOduration=1.604669146 podStartE2EDuration="2.25091188s" podCreationTimestamp="2025-11-21 14:14:04 +0000 UTC" firstStartedPulling="2025-11-21 14:14:05.142904344 +0000 UTC m=+1904.940736592" lastFinishedPulling="2025-11-21 14:14:05.789147068 +0000 UTC m=+1905.586979326" observedRunningTime="2025-11-21 14:14:06.250091948 +0000 UTC m=+1906.047924216" watchObservedRunningTime="2025-11-21 14:14:06.25091188 +0000 UTC m=+1906.048744148" Nov 21 14:14:11 crc kubenswrapper[5133]: I1121 14:14:11.286201 5133 generic.go:334] "Generic (PLEG): container finished" podID="50083ce9-d8d7-40f9-aceb-4fd801670062" containerID="c5eb8cff7df0169eda8c94fb4dd988a2c53d2c5325725035badd2cf7b1d644c5" exitCode=0 Nov 21 14:14:11 crc kubenswrapper[5133]: I1121 14:14:11.286317 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-c2jrr" event={"ID":"50083ce9-d8d7-40f9-aceb-4fd801670062","Type":"ContainerDied","Data":"c5eb8cff7df0169eda8c94fb4dd988a2c53d2c5325725035badd2cf7b1d644c5"} Nov 21 14:14:12 crc kubenswrapper[5133]: I1121 14:14:12.738225 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-c2jrr" Nov 21 14:14:12 crc kubenswrapper[5133]: I1121 14:14:12.833220 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/50083ce9-d8d7-40f9-aceb-4fd801670062-inventory\") pod \"50083ce9-d8d7-40f9-aceb-4fd801670062\" (UID: \"50083ce9-d8d7-40f9-aceb-4fd801670062\") " Nov 21 14:14:12 crc kubenswrapper[5133]: I1121 14:14:12.833280 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tjqnv\" (UniqueName: \"kubernetes.io/projected/50083ce9-d8d7-40f9-aceb-4fd801670062-kube-api-access-tjqnv\") pod \"50083ce9-d8d7-40f9-aceb-4fd801670062\" (UID: \"50083ce9-d8d7-40f9-aceb-4fd801670062\") " Nov 21 14:14:12 crc kubenswrapper[5133]: I1121 14:14:12.833302 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/50083ce9-d8d7-40f9-aceb-4fd801670062-ssh-key\") pod \"50083ce9-d8d7-40f9-aceb-4fd801670062\" (UID: \"50083ce9-d8d7-40f9-aceb-4fd801670062\") " Nov 21 14:14:12 crc kubenswrapper[5133]: I1121 14:14:12.840106 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/50083ce9-d8d7-40f9-aceb-4fd801670062-kube-api-access-tjqnv" (OuterVolumeSpecName: "kube-api-access-tjqnv") pod "50083ce9-d8d7-40f9-aceb-4fd801670062" (UID: "50083ce9-d8d7-40f9-aceb-4fd801670062"). InnerVolumeSpecName "kube-api-access-tjqnv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:14:12 crc kubenswrapper[5133]: I1121 14:14:12.866214 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/50083ce9-d8d7-40f9-aceb-4fd801670062-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "50083ce9-d8d7-40f9-aceb-4fd801670062" (UID: "50083ce9-d8d7-40f9-aceb-4fd801670062"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:14:12 crc kubenswrapper[5133]: I1121 14:14:12.868311 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/50083ce9-d8d7-40f9-aceb-4fd801670062-inventory" (OuterVolumeSpecName: "inventory") pod "50083ce9-d8d7-40f9-aceb-4fd801670062" (UID: "50083ce9-d8d7-40f9-aceb-4fd801670062"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:14:12 crc kubenswrapper[5133]: I1121 14:14:12.935473 5133 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/50083ce9-d8d7-40f9-aceb-4fd801670062-inventory\") on node \"crc\" DevicePath \"\"" Nov 21 14:14:12 crc kubenswrapper[5133]: I1121 14:14:12.935527 5133 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/50083ce9-d8d7-40f9-aceb-4fd801670062-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 21 14:14:12 crc kubenswrapper[5133]: I1121 14:14:12.935548 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tjqnv\" (UniqueName: \"kubernetes.io/projected/50083ce9-d8d7-40f9-aceb-4fd801670062-kube-api-access-tjqnv\") on node \"crc\" DevicePath \"\"" Nov 21 14:14:13 crc kubenswrapper[5133]: I1121 14:14:13.304862 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-c2jrr" event={"ID":"50083ce9-d8d7-40f9-aceb-4fd801670062","Type":"ContainerDied","Data":"e030f120282e3a0e17f31c04c45071228e99fb9d82e87b46482e3a075369cba2"} Nov 21 14:14:13 crc kubenswrapper[5133]: I1121 14:14:13.304906 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e030f120282e3a0e17f31c04c45071228e99fb9d82e87b46482e3a075369cba2" Nov 21 14:14:13 crc kubenswrapper[5133]: I1121 14:14:13.304970 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-c2jrr" Nov 21 14:14:13 crc kubenswrapper[5133]: I1121 14:14:13.405235 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-btw78"] Nov 21 14:14:13 crc kubenswrapper[5133]: E1121 14:14:13.405741 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="50083ce9-d8d7-40f9-aceb-4fd801670062" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 21 14:14:13 crc kubenswrapper[5133]: I1121 14:14:13.405768 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="50083ce9-d8d7-40f9-aceb-4fd801670062" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 21 14:14:13 crc kubenswrapper[5133]: I1121 14:14:13.406136 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="50083ce9-d8d7-40f9-aceb-4fd801670062" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 21 14:14:13 crc kubenswrapper[5133]: I1121 14:14:13.407022 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-btw78" Nov 21 14:14:13 crc kubenswrapper[5133]: I1121 14:14:13.411740 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 21 14:14:13 crc kubenswrapper[5133]: I1121 14:14:13.412234 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 21 14:14:13 crc kubenswrapper[5133]: I1121 14:14:13.412533 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 21 14:14:13 crc kubenswrapper[5133]: I1121 14:14:13.412870 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-ztmk4" Nov 21 14:14:13 crc kubenswrapper[5133]: I1121 14:14:13.413607 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-btw78"] Nov 21 14:14:13 crc kubenswrapper[5133]: I1121 14:14:13.546601 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a00d8823-6a3e-4348-aefc-e0101509ad83-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-btw78\" (UID: \"a00d8823-6a3e-4348-aefc-e0101509ad83\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-btw78" Nov 21 14:14:13 crc kubenswrapper[5133]: I1121 14:14:13.546649 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8cskt\" (UniqueName: \"kubernetes.io/projected/a00d8823-6a3e-4348-aefc-e0101509ad83-kube-api-access-8cskt\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-btw78\" (UID: \"a00d8823-6a3e-4348-aefc-e0101509ad83\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-btw78" Nov 21 14:14:13 crc kubenswrapper[5133]: I1121 14:14:13.546681 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a00d8823-6a3e-4348-aefc-e0101509ad83-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-btw78\" (UID: \"a00d8823-6a3e-4348-aefc-e0101509ad83\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-btw78" Nov 21 14:14:13 crc kubenswrapper[5133]: I1121 14:14:13.648138 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a00d8823-6a3e-4348-aefc-e0101509ad83-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-btw78\" (UID: \"a00d8823-6a3e-4348-aefc-e0101509ad83\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-btw78" Nov 21 14:14:13 crc kubenswrapper[5133]: I1121 14:14:13.648212 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8cskt\" (UniqueName: \"kubernetes.io/projected/a00d8823-6a3e-4348-aefc-e0101509ad83-kube-api-access-8cskt\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-btw78\" (UID: \"a00d8823-6a3e-4348-aefc-e0101509ad83\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-btw78" Nov 21 14:14:13 crc kubenswrapper[5133]: I1121 14:14:13.648256 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a00d8823-6a3e-4348-aefc-e0101509ad83-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-btw78\" (UID: \"a00d8823-6a3e-4348-aefc-e0101509ad83\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-btw78" Nov 21 14:14:13 crc kubenswrapper[5133]: I1121 14:14:13.652116 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a00d8823-6a3e-4348-aefc-e0101509ad83-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-btw78\" (UID: \"a00d8823-6a3e-4348-aefc-e0101509ad83\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-btw78" Nov 21 14:14:13 crc kubenswrapper[5133]: I1121 14:14:13.652116 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a00d8823-6a3e-4348-aefc-e0101509ad83-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-btw78\" (UID: \"a00d8823-6a3e-4348-aefc-e0101509ad83\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-btw78" Nov 21 14:14:13 crc kubenswrapper[5133]: I1121 14:14:13.665367 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8cskt\" (UniqueName: \"kubernetes.io/projected/a00d8823-6a3e-4348-aefc-e0101509ad83-kube-api-access-8cskt\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-btw78\" (UID: \"a00d8823-6a3e-4348-aefc-e0101509ad83\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-btw78" Nov 21 14:14:13 crc kubenswrapper[5133]: I1121 14:14:13.745953 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-btw78" Nov 21 14:14:14 crc kubenswrapper[5133]: I1121 14:14:14.259374 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-btw78"] Nov 21 14:14:14 crc kubenswrapper[5133]: W1121 14:14:14.271717 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda00d8823_6a3e_4348_aefc_e0101509ad83.slice/crio-ce8359e0925be8b83e0c49d1f8ea64d9941b8f1c6bbdcd5999fbf682a247fb36 WatchSource:0}: Error finding container ce8359e0925be8b83e0c49d1f8ea64d9941b8f1c6bbdcd5999fbf682a247fb36: Status 404 returned error can't find the container with id ce8359e0925be8b83e0c49d1f8ea64d9941b8f1c6bbdcd5999fbf682a247fb36 Nov 21 14:14:14 crc kubenswrapper[5133]: I1121 14:14:14.321021 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-btw78" event={"ID":"a00d8823-6a3e-4348-aefc-e0101509ad83","Type":"ContainerStarted","Data":"ce8359e0925be8b83e0c49d1f8ea64d9941b8f1c6bbdcd5999fbf682a247fb36"} Nov 21 14:14:15 crc kubenswrapper[5133]: I1121 14:14:15.457926 5133 scope.go:117] "RemoveContainer" containerID="532d197f04bffdd4c14ef24edeeddc3a83f198653923e9096885d7f28ddecf77" Nov 21 14:14:15 crc kubenswrapper[5133]: E1121 14:14:15.458397 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:14:17 crc kubenswrapper[5133]: I1121 14:14:17.351298 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-btw78" event={"ID":"a00d8823-6a3e-4348-aefc-e0101509ad83","Type":"ContainerStarted","Data":"32f7eb89303faa09234527ee8fd83a587998293788060d8ec123d23e54a1c964"} Nov 21 14:14:17 crc kubenswrapper[5133]: I1121 14:14:17.386284 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-btw78" podStartSLOduration=2.51229153 podStartE2EDuration="4.386257769s" podCreationTimestamp="2025-11-21 14:14:13 +0000 UTC" firstStartedPulling="2025-11-21 14:14:14.274154124 +0000 UTC m=+1914.071986382" lastFinishedPulling="2025-11-21 14:14:16.148120333 +0000 UTC m=+1915.945952621" observedRunningTime="2025-11-21 14:14:17.371679767 +0000 UTC m=+1917.169512055" watchObservedRunningTime="2025-11-21 14:14:17.386257769 +0000 UTC m=+1917.184090017" Nov 21 14:14:18 crc kubenswrapper[5133]: I1121 14:14:18.062630 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-dcmgn"] Nov 21 14:14:18 crc kubenswrapper[5133]: I1121 14:14:18.074391 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-tvs68"] Nov 21 14:14:18 crc kubenswrapper[5133]: I1121 14:14:18.103101 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-6blxq"] Nov 21 14:14:18 crc kubenswrapper[5133]: I1121 14:14:18.114030 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-tvs68"] Nov 21 14:14:18 crc kubenswrapper[5133]: I1121 14:14:18.124922 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-dcmgn"] Nov 21 14:14:18 crc kubenswrapper[5133]: I1121 14:14:18.132772 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-6blxq"] Nov 21 14:14:18 crc kubenswrapper[5133]: I1121 14:14:18.470248 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5782c62d-fdcd-43f5-9af1-c84968e501ed" path="/var/lib/kubelet/pods/5782c62d-fdcd-43f5-9af1-c84968e501ed/volumes" Nov 21 14:14:18 crc kubenswrapper[5133]: I1121 14:14:18.471068 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="76b580d8-fd56-40c0-a24a-1a3234d95ca6" path="/var/lib/kubelet/pods/76b580d8-fd56-40c0-a24a-1a3234d95ca6/volumes" Nov 21 14:14:18 crc kubenswrapper[5133]: I1121 14:14:18.472026 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7b1d299f-58dc-43de-a331-e7a15063fcd0" path="/var/lib/kubelet/pods/7b1d299f-58dc-43de-a331-e7a15063fcd0/volumes" Nov 21 14:14:26 crc kubenswrapper[5133]: I1121 14:14:26.054533 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-26kdp"] Nov 21 14:14:26 crc kubenswrapper[5133]: I1121 14:14:26.073380 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-26kdp"] Nov 21 14:14:26 crc kubenswrapper[5133]: I1121 14:14:26.479137 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="944fb1ae-3bf4-479a-863d-62867fdf5b82" path="/var/lib/kubelet/pods/944fb1ae-3bf4-479a-863d-62867fdf5b82/volumes" Nov 21 14:14:30 crc kubenswrapper[5133]: I1121 14:14:30.459145 5133 scope.go:117] "RemoveContainer" containerID="532d197f04bffdd4c14ef24edeeddc3a83f198653923e9096885d7f28ddecf77" Nov 21 14:14:30 crc kubenswrapper[5133]: E1121 14:14:30.459946 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:14:35 crc kubenswrapper[5133]: I1121 14:14:35.892814 5133 scope.go:117] "RemoveContainer" containerID="1d8016066359fdca507d97f1f9ba81316b356e4b80486a94be57ffe845b858cb" Nov 21 14:14:35 crc kubenswrapper[5133]: I1121 14:14:35.964045 5133 scope.go:117] "RemoveContainer" containerID="1b012093ae77969b0a3c7baae1bd9fe3b1c9a037d5139292c62a7d2f76c2c18e" Nov 21 14:14:36 crc kubenswrapper[5133]: I1121 14:14:36.047807 5133 scope.go:117] "RemoveContainer" containerID="6689a9fbf1b6604c2250085a46f175acbea7ce5690f920e0efeed92fe57ae38b" Nov 21 14:14:36 crc kubenswrapper[5133]: I1121 14:14:36.068866 5133 scope.go:117] "RemoveContainer" containerID="da90ab00cb608f0d016ddf04a222142730d338b65882482d85d8fe3dadbbdff9" Nov 21 14:14:36 crc kubenswrapper[5133]: I1121 14:14:36.122818 5133 scope.go:117] "RemoveContainer" containerID="af35a52559600ac679d265671a6b2484259a5617266456e79ef435f6c440aef9" Nov 21 14:14:36 crc kubenswrapper[5133]: I1121 14:14:36.169694 5133 scope.go:117] "RemoveContainer" containerID="8f8f8d2a0f69e6b1740fc631a7e66fbc9797fca02d3a2526f4924a14ecba1735" Nov 21 14:14:36 crc kubenswrapper[5133]: I1121 14:14:36.199659 5133 scope.go:117] "RemoveContainer" containerID="dfaba6e19a83f28c8fa298433848a54c502472ca950ad72a5c4aef29e00da940" Nov 21 14:14:36 crc kubenswrapper[5133]: I1121 14:14:36.235543 5133 scope.go:117] "RemoveContainer" containerID="2f1f90d7f092574e582f9159c77dedddc3e531fe122a1cfc85226f9feaae00c2" Nov 21 14:14:36 crc kubenswrapper[5133]: I1121 14:14:36.258823 5133 scope.go:117] "RemoveContainer" containerID="4245b8bbae7058ae2b18cfc715da4741937ddd97a0fe3033bccdcf669791b2b9" Nov 21 14:14:37 crc kubenswrapper[5133]: I1121 14:14:37.038336 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-m47ck"] Nov 21 14:14:37 crc kubenswrapper[5133]: I1121 14:14:37.047303 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-m47ck"] Nov 21 14:14:38 crc kubenswrapper[5133]: I1121 14:14:38.035793 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-8cz2f"] Nov 21 14:14:38 crc kubenswrapper[5133]: I1121 14:14:38.042418 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-8cz2f"] Nov 21 14:14:38 crc kubenswrapper[5133]: I1121 14:14:38.479679 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b61488dd-2db1-43b5-996b-43b76a5dbda6" path="/var/lib/kubelet/pods/b61488dd-2db1-43b5-996b-43b76a5dbda6/volumes" Nov 21 14:14:38 crc kubenswrapper[5133]: I1121 14:14:38.481555 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e5102115-b63e-42e2-8aae-1a68e7dda37c" path="/var/lib/kubelet/pods/e5102115-b63e-42e2-8aae-1a68e7dda37c/volumes" Nov 21 14:14:41 crc kubenswrapper[5133]: I1121 14:14:41.457734 5133 scope.go:117] "RemoveContainer" containerID="532d197f04bffdd4c14ef24edeeddc3a83f198653923e9096885d7f28ddecf77" Nov 21 14:14:41 crc kubenswrapper[5133]: E1121 14:14:41.458573 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:14:54 crc kubenswrapper[5133]: I1121 14:14:54.458223 5133 scope.go:117] "RemoveContainer" containerID="532d197f04bffdd4c14ef24edeeddc3a83f198653923e9096885d7f28ddecf77" Nov 21 14:14:54 crc kubenswrapper[5133]: I1121 14:14:54.715374 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" event={"ID":"52f5a729-05d1-4f84-a216-1df3233af57d","Type":"ContainerStarted","Data":"2906b97bc9f85b4f70f32fefe8fea2cab9f1e58520d8b56a7cb015384ab82031"} Nov 21 14:15:00 crc kubenswrapper[5133]: I1121 14:15:00.161136 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395575-xg87j"] Nov 21 14:15:00 crc kubenswrapper[5133]: I1121 14:15:00.163503 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395575-xg87j" Nov 21 14:15:00 crc kubenswrapper[5133]: I1121 14:15:00.165865 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 21 14:15:00 crc kubenswrapper[5133]: I1121 14:15:00.166245 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 21 14:15:00 crc kubenswrapper[5133]: I1121 14:15:00.176852 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395575-xg87j"] Nov 21 14:15:00 crc kubenswrapper[5133]: I1121 14:15:00.345914 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/129a90f1-ca6c-4eb2-8130-ede8a21ac65a-config-volume\") pod \"collect-profiles-29395575-xg87j\" (UID: \"129a90f1-ca6c-4eb2-8130-ede8a21ac65a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395575-xg87j" Nov 21 14:15:00 crc kubenswrapper[5133]: I1121 14:15:00.346169 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cdzz4\" (UniqueName: \"kubernetes.io/projected/129a90f1-ca6c-4eb2-8130-ede8a21ac65a-kube-api-access-cdzz4\") pod \"collect-profiles-29395575-xg87j\" (UID: \"129a90f1-ca6c-4eb2-8130-ede8a21ac65a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395575-xg87j" Nov 21 14:15:00 crc kubenswrapper[5133]: I1121 14:15:00.346326 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/129a90f1-ca6c-4eb2-8130-ede8a21ac65a-secret-volume\") pod \"collect-profiles-29395575-xg87j\" (UID: \"129a90f1-ca6c-4eb2-8130-ede8a21ac65a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395575-xg87j" Nov 21 14:15:00 crc kubenswrapper[5133]: I1121 14:15:00.447916 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/129a90f1-ca6c-4eb2-8130-ede8a21ac65a-secret-volume\") pod \"collect-profiles-29395575-xg87j\" (UID: \"129a90f1-ca6c-4eb2-8130-ede8a21ac65a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395575-xg87j" Nov 21 14:15:00 crc kubenswrapper[5133]: I1121 14:15:00.448061 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/129a90f1-ca6c-4eb2-8130-ede8a21ac65a-config-volume\") pod \"collect-profiles-29395575-xg87j\" (UID: \"129a90f1-ca6c-4eb2-8130-ede8a21ac65a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395575-xg87j" Nov 21 14:15:00 crc kubenswrapper[5133]: I1121 14:15:00.448137 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cdzz4\" (UniqueName: \"kubernetes.io/projected/129a90f1-ca6c-4eb2-8130-ede8a21ac65a-kube-api-access-cdzz4\") pod \"collect-profiles-29395575-xg87j\" (UID: \"129a90f1-ca6c-4eb2-8130-ede8a21ac65a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395575-xg87j" Nov 21 14:15:00 crc kubenswrapper[5133]: I1121 14:15:00.448903 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/129a90f1-ca6c-4eb2-8130-ede8a21ac65a-config-volume\") pod \"collect-profiles-29395575-xg87j\" (UID: \"129a90f1-ca6c-4eb2-8130-ede8a21ac65a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395575-xg87j" Nov 21 14:15:00 crc kubenswrapper[5133]: I1121 14:15:00.455511 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/129a90f1-ca6c-4eb2-8130-ede8a21ac65a-secret-volume\") pod \"collect-profiles-29395575-xg87j\" (UID: \"129a90f1-ca6c-4eb2-8130-ede8a21ac65a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395575-xg87j" Nov 21 14:15:00 crc kubenswrapper[5133]: I1121 14:15:00.475544 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cdzz4\" (UniqueName: \"kubernetes.io/projected/129a90f1-ca6c-4eb2-8130-ede8a21ac65a-kube-api-access-cdzz4\") pod \"collect-profiles-29395575-xg87j\" (UID: \"129a90f1-ca6c-4eb2-8130-ede8a21ac65a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395575-xg87j" Nov 21 14:15:00 crc kubenswrapper[5133]: I1121 14:15:00.522247 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395575-xg87j" Nov 21 14:15:00 crc kubenswrapper[5133]: I1121 14:15:00.778986 5133 generic.go:334] "Generic (PLEG): container finished" podID="a00d8823-6a3e-4348-aefc-e0101509ad83" containerID="32f7eb89303faa09234527ee8fd83a587998293788060d8ec123d23e54a1c964" exitCode=0 Nov 21 14:15:00 crc kubenswrapper[5133]: I1121 14:15:00.779232 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-btw78" event={"ID":"a00d8823-6a3e-4348-aefc-e0101509ad83","Type":"ContainerDied","Data":"32f7eb89303faa09234527ee8fd83a587998293788060d8ec123d23e54a1c964"} Nov 21 14:15:00 crc kubenswrapper[5133]: I1121 14:15:00.990617 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395575-xg87j"] Nov 21 14:15:01 crc kubenswrapper[5133]: I1121 14:15:01.790891 5133 generic.go:334] "Generic (PLEG): container finished" podID="129a90f1-ca6c-4eb2-8130-ede8a21ac65a" containerID="3ca5536b8a3a21486a7e504b3c93b75424ef43f7812000f2b73cca6561cde7c6" exitCode=0 Nov 21 14:15:01 crc kubenswrapper[5133]: I1121 14:15:01.791014 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395575-xg87j" event={"ID":"129a90f1-ca6c-4eb2-8130-ede8a21ac65a","Type":"ContainerDied","Data":"3ca5536b8a3a21486a7e504b3c93b75424ef43f7812000f2b73cca6561cde7c6"} Nov 21 14:15:01 crc kubenswrapper[5133]: I1121 14:15:01.791068 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395575-xg87j" event={"ID":"129a90f1-ca6c-4eb2-8130-ede8a21ac65a","Type":"ContainerStarted","Data":"8199714f86778157e055736cf8f4bc2ec15accd9f7aca47d2a3a18471499cb10"} Nov 21 14:15:02 crc kubenswrapper[5133]: I1121 14:15:02.360768 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-btw78" Nov 21 14:15:02 crc kubenswrapper[5133]: I1121 14:15:02.388715 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8cskt\" (UniqueName: \"kubernetes.io/projected/a00d8823-6a3e-4348-aefc-e0101509ad83-kube-api-access-8cskt\") pod \"a00d8823-6a3e-4348-aefc-e0101509ad83\" (UID: \"a00d8823-6a3e-4348-aefc-e0101509ad83\") " Nov 21 14:15:02 crc kubenswrapper[5133]: I1121 14:15:02.388782 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a00d8823-6a3e-4348-aefc-e0101509ad83-ssh-key\") pod \"a00d8823-6a3e-4348-aefc-e0101509ad83\" (UID: \"a00d8823-6a3e-4348-aefc-e0101509ad83\") " Nov 21 14:15:02 crc kubenswrapper[5133]: I1121 14:15:02.388858 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a00d8823-6a3e-4348-aefc-e0101509ad83-inventory\") pod \"a00d8823-6a3e-4348-aefc-e0101509ad83\" (UID: \"a00d8823-6a3e-4348-aefc-e0101509ad83\") " Nov 21 14:15:02 crc kubenswrapper[5133]: I1121 14:15:02.403645 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a00d8823-6a3e-4348-aefc-e0101509ad83-kube-api-access-8cskt" (OuterVolumeSpecName: "kube-api-access-8cskt") pod "a00d8823-6a3e-4348-aefc-e0101509ad83" (UID: "a00d8823-6a3e-4348-aefc-e0101509ad83"). InnerVolumeSpecName "kube-api-access-8cskt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:15:02 crc kubenswrapper[5133]: I1121 14:15:02.420198 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a00d8823-6a3e-4348-aefc-e0101509ad83-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "a00d8823-6a3e-4348-aefc-e0101509ad83" (UID: "a00d8823-6a3e-4348-aefc-e0101509ad83"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:15:02 crc kubenswrapper[5133]: I1121 14:15:02.427064 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a00d8823-6a3e-4348-aefc-e0101509ad83-inventory" (OuterVolumeSpecName: "inventory") pod "a00d8823-6a3e-4348-aefc-e0101509ad83" (UID: "a00d8823-6a3e-4348-aefc-e0101509ad83"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:15:02 crc kubenswrapper[5133]: I1121 14:15:02.492029 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8cskt\" (UniqueName: \"kubernetes.io/projected/a00d8823-6a3e-4348-aefc-e0101509ad83-kube-api-access-8cskt\") on node \"crc\" DevicePath \"\"" Nov 21 14:15:02 crc kubenswrapper[5133]: I1121 14:15:02.492058 5133 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a00d8823-6a3e-4348-aefc-e0101509ad83-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 21 14:15:02 crc kubenswrapper[5133]: I1121 14:15:02.492073 5133 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a00d8823-6a3e-4348-aefc-e0101509ad83-inventory\") on node \"crc\" DevicePath \"\"" Nov 21 14:15:02 crc kubenswrapper[5133]: I1121 14:15:02.799546 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-btw78" event={"ID":"a00d8823-6a3e-4348-aefc-e0101509ad83","Type":"ContainerDied","Data":"ce8359e0925be8b83e0c49d1f8ea64d9941b8f1c6bbdcd5999fbf682a247fb36"} Nov 21 14:15:02 crc kubenswrapper[5133]: I1121 14:15:02.799599 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ce8359e0925be8b83e0c49d1f8ea64d9941b8f1c6bbdcd5999fbf682a247fb36" Nov 21 14:15:02 crc kubenswrapper[5133]: I1121 14:15:02.799577 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-btw78" Nov 21 14:15:02 crc kubenswrapper[5133]: I1121 14:15:02.901789 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-6rjkp"] Nov 21 14:15:02 crc kubenswrapper[5133]: E1121 14:15:02.902240 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a00d8823-6a3e-4348-aefc-e0101509ad83" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 21 14:15:02 crc kubenswrapper[5133]: I1121 14:15:02.902262 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="a00d8823-6a3e-4348-aefc-e0101509ad83" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 21 14:15:02 crc kubenswrapper[5133]: I1121 14:15:02.902412 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="a00d8823-6a3e-4348-aefc-e0101509ad83" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 21 14:15:02 crc kubenswrapper[5133]: I1121 14:15:02.903091 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-6rjkp" Nov 21 14:15:02 crc kubenswrapper[5133]: I1121 14:15:02.905309 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 21 14:15:02 crc kubenswrapper[5133]: I1121 14:15:02.905622 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-ztmk4" Nov 21 14:15:02 crc kubenswrapper[5133]: I1121 14:15:02.905747 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 21 14:15:02 crc kubenswrapper[5133]: I1121 14:15:02.905985 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 21 14:15:02 crc kubenswrapper[5133]: I1121 14:15:02.909810 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-6rjkp"] Nov 21 14:15:03 crc kubenswrapper[5133]: I1121 14:15:03.101337 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/39394758-b0aa-434c-b296-7c23eac0abee-ssh-key\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-6rjkp\" (UID: \"39394758-b0aa-434c-b296-7c23eac0abee\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-6rjkp" Nov 21 14:15:03 crc kubenswrapper[5133]: I1121 14:15:03.101489 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wl9xl\" (UniqueName: \"kubernetes.io/projected/39394758-b0aa-434c-b296-7c23eac0abee-kube-api-access-wl9xl\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-6rjkp\" (UID: \"39394758-b0aa-434c-b296-7c23eac0abee\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-6rjkp" Nov 21 14:15:03 crc kubenswrapper[5133]: I1121 14:15:03.101527 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/39394758-b0aa-434c-b296-7c23eac0abee-inventory\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-6rjkp\" (UID: \"39394758-b0aa-434c-b296-7c23eac0abee\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-6rjkp" Nov 21 14:15:03 crc kubenswrapper[5133]: I1121 14:15:03.129261 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395575-xg87j" Nov 21 14:15:03 crc kubenswrapper[5133]: I1121 14:15:03.202990 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wl9xl\" (UniqueName: \"kubernetes.io/projected/39394758-b0aa-434c-b296-7c23eac0abee-kube-api-access-wl9xl\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-6rjkp\" (UID: \"39394758-b0aa-434c-b296-7c23eac0abee\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-6rjkp" Nov 21 14:15:03 crc kubenswrapper[5133]: I1121 14:15:03.203075 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/39394758-b0aa-434c-b296-7c23eac0abee-inventory\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-6rjkp\" (UID: \"39394758-b0aa-434c-b296-7c23eac0abee\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-6rjkp" Nov 21 14:15:03 crc kubenswrapper[5133]: I1121 14:15:03.203237 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/39394758-b0aa-434c-b296-7c23eac0abee-ssh-key\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-6rjkp\" (UID: \"39394758-b0aa-434c-b296-7c23eac0abee\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-6rjkp" Nov 21 14:15:03 crc kubenswrapper[5133]: I1121 14:15:03.208680 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/39394758-b0aa-434c-b296-7c23eac0abee-ssh-key\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-6rjkp\" (UID: \"39394758-b0aa-434c-b296-7c23eac0abee\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-6rjkp" Nov 21 14:15:03 crc kubenswrapper[5133]: I1121 14:15:03.216474 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/39394758-b0aa-434c-b296-7c23eac0abee-inventory\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-6rjkp\" (UID: \"39394758-b0aa-434c-b296-7c23eac0abee\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-6rjkp" Nov 21 14:15:03 crc kubenswrapper[5133]: I1121 14:15:03.219818 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wl9xl\" (UniqueName: \"kubernetes.io/projected/39394758-b0aa-434c-b296-7c23eac0abee-kube-api-access-wl9xl\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-6rjkp\" (UID: \"39394758-b0aa-434c-b296-7c23eac0abee\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-6rjkp" Nov 21 14:15:03 crc kubenswrapper[5133]: I1121 14:15:03.238128 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-6rjkp" Nov 21 14:15:03 crc kubenswrapper[5133]: I1121 14:15:03.304345 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cdzz4\" (UniqueName: \"kubernetes.io/projected/129a90f1-ca6c-4eb2-8130-ede8a21ac65a-kube-api-access-cdzz4\") pod \"129a90f1-ca6c-4eb2-8130-ede8a21ac65a\" (UID: \"129a90f1-ca6c-4eb2-8130-ede8a21ac65a\") " Nov 21 14:15:03 crc kubenswrapper[5133]: I1121 14:15:03.304459 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/129a90f1-ca6c-4eb2-8130-ede8a21ac65a-secret-volume\") pod \"129a90f1-ca6c-4eb2-8130-ede8a21ac65a\" (UID: \"129a90f1-ca6c-4eb2-8130-ede8a21ac65a\") " Nov 21 14:15:03 crc kubenswrapper[5133]: I1121 14:15:03.304636 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/129a90f1-ca6c-4eb2-8130-ede8a21ac65a-config-volume\") pod \"129a90f1-ca6c-4eb2-8130-ede8a21ac65a\" (UID: \"129a90f1-ca6c-4eb2-8130-ede8a21ac65a\") " Nov 21 14:15:03 crc kubenswrapper[5133]: I1121 14:15:03.305627 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/129a90f1-ca6c-4eb2-8130-ede8a21ac65a-config-volume" (OuterVolumeSpecName: "config-volume") pod "129a90f1-ca6c-4eb2-8130-ede8a21ac65a" (UID: "129a90f1-ca6c-4eb2-8130-ede8a21ac65a"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:15:03 crc kubenswrapper[5133]: I1121 14:15:03.309901 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/129a90f1-ca6c-4eb2-8130-ede8a21ac65a-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "129a90f1-ca6c-4eb2-8130-ede8a21ac65a" (UID: "129a90f1-ca6c-4eb2-8130-ede8a21ac65a"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:15:03 crc kubenswrapper[5133]: I1121 14:15:03.312257 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/129a90f1-ca6c-4eb2-8130-ede8a21ac65a-kube-api-access-cdzz4" (OuterVolumeSpecName: "kube-api-access-cdzz4") pod "129a90f1-ca6c-4eb2-8130-ede8a21ac65a" (UID: "129a90f1-ca6c-4eb2-8130-ede8a21ac65a"). InnerVolumeSpecName "kube-api-access-cdzz4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:15:03 crc kubenswrapper[5133]: I1121 14:15:03.406606 5133 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/129a90f1-ca6c-4eb2-8130-ede8a21ac65a-config-volume\") on node \"crc\" DevicePath \"\"" Nov 21 14:15:03 crc kubenswrapper[5133]: I1121 14:15:03.406635 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cdzz4\" (UniqueName: \"kubernetes.io/projected/129a90f1-ca6c-4eb2-8130-ede8a21ac65a-kube-api-access-cdzz4\") on node \"crc\" DevicePath \"\"" Nov 21 14:15:03 crc kubenswrapper[5133]: I1121 14:15:03.406646 5133 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/129a90f1-ca6c-4eb2-8130-ede8a21ac65a-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 21 14:15:03 crc kubenswrapper[5133]: I1121 14:15:03.760897 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-6rjkp"] Nov 21 14:15:03 crc kubenswrapper[5133]: I1121 14:15:03.809030 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-6rjkp" event={"ID":"39394758-b0aa-434c-b296-7c23eac0abee","Type":"ContainerStarted","Data":"631addb03abc2a70748865d7f71293b0d37378655ec211c9541c2b39e044cda3"} Nov 21 14:15:03 crc kubenswrapper[5133]: I1121 14:15:03.811131 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395575-xg87j" event={"ID":"129a90f1-ca6c-4eb2-8130-ede8a21ac65a","Type":"ContainerDied","Data":"8199714f86778157e055736cf8f4bc2ec15accd9f7aca47d2a3a18471499cb10"} Nov 21 14:15:03 crc kubenswrapper[5133]: I1121 14:15:03.811163 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8199714f86778157e055736cf8f4bc2ec15accd9f7aca47d2a3a18471499cb10" Nov 21 14:15:03 crc kubenswrapper[5133]: I1121 14:15:03.811198 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395575-xg87j" Nov 21 14:15:04 crc kubenswrapper[5133]: I1121 14:15:04.820286 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-6rjkp" event={"ID":"39394758-b0aa-434c-b296-7c23eac0abee","Type":"ContainerStarted","Data":"4e067bb5548a20b221329767cdc1354ceeb3307ef6d4f13dc58ad5fc7a7aa4c3"} Nov 21 14:15:04 crc kubenswrapper[5133]: I1121 14:15:04.841131 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-6rjkp" podStartSLOduration=2.446301924 podStartE2EDuration="2.841115554s" podCreationTimestamp="2025-11-21 14:15:02 +0000 UTC" firstStartedPulling="2025-11-21 14:15:03.769846477 +0000 UTC m=+1963.567678725" lastFinishedPulling="2025-11-21 14:15:04.164660107 +0000 UTC m=+1963.962492355" observedRunningTime="2025-11-21 14:15:04.837108456 +0000 UTC m=+1964.634940714" watchObservedRunningTime="2025-11-21 14:15:04.841115554 +0000 UTC m=+1964.638947802" Nov 21 14:15:08 crc kubenswrapper[5133]: I1121 14:15:08.850260 5133 generic.go:334] "Generic (PLEG): container finished" podID="39394758-b0aa-434c-b296-7c23eac0abee" containerID="4e067bb5548a20b221329767cdc1354ceeb3307ef6d4f13dc58ad5fc7a7aa4c3" exitCode=0 Nov 21 14:15:08 crc kubenswrapper[5133]: I1121 14:15:08.850300 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-6rjkp" event={"ID":"39394758-b0aa-434c-b296-7c23eac0abee","Type":"ContainerDied","Data":"4e067bb5548a20b221329767cdc1354ceeb3307ef6d4f13dc58ad5fc7a7aa4c3"} Nov 21 14:15:10 crc kubenswrapper[5133]: I1121 14:15:10.323693 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-6rjkp" Nov 21 14:15:10 crc kubenswrapper[5133]: I1121 14:15:10.425232 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wl9xl\" (UniqueName: \"kubernetes.io/projected/39394758-b0aa-434c-b296-7c23eac0abee-kube-api-access-wl9xl\") pod \"39394758-b0aa-434c-b296-7c23eac0abee\" (UID: \"39394758-b0aa-434c-b296-7c23eac0abee\") " Nov 21 14:15:10 crc kubenswrapper[5133]: I1121 14:15:10.425346 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/39394758-b0aa-434c-b296-7c23eac0abee-ssh-key\") pod \"39394758-b0aa-434c-b296-7c23eac0abee\" (UID: \"39394758-b0aa-434c-b296-7c23eac0abee\") " Nov 21 14:15:10 crc kubenswrapper[5133]: I1121 14:15:10.425862 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/39394758-b0aa-434c-b296-7c23eac0abee-inventory\") pod \"39394758-b0aa-434c-b296-7c23eac0abee\" (UID: \"39394758-b0aa-434c-b296-7c23eac0abee\") " Nov 21 14:15:10 crc kubenswrapper[5133]: I1121 14:15:10.432035 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/39394758-b0aa-434c-b296-7c23eac0abee-kube-api-access-wl9xl" (OuterVolumeSpecName: "kube-api-access-wl9xl") pod "39394758-b0aa-434c-b296-7c23eac0abee" (UID: "39394758-b0aa-434c-b296-7c23eac0abee"). InnerVolumeSpecName "kube-api-access-wl9xl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:15:10 crc kubenswrapper[5133]: I1121 14:15:10.450035 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/39394758-b0aa-434c-b296-7c23eac0abee-inventory" (OuterVolumeSpecName: "inventory") pod "39394758-b0aa-434c-b296-7c23eac0abee" (UID: "39394758-b0aa-434c-b296-7c23eac0abee"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:15:10 crc kubenswrapper[5133]: I1121 14:15:10.457032 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/39394758-b0aa-434c-b296-7c23eac0abee-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "39394758-b0aa-434c-b296-7c23eac0abee" (UID: "39394758-b0aa-434c-b296-7c23eac0abee"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:15:10 crc kubenswrapper[5133]: I1121 14:15:10.528322 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wl9xl\" (UniqueName: \"kubernetes.io/projected/39394758-b0aa-434c-b296-7c23eac0abee-kube-api-access-wl9xl\") on node \"crc\" DevicePath \"\"" Nov 21 14:15:10 crc kubenswrapper[5133]: I1121 14:15:10.528360 5133 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/39394758-b0aa-434c-b296-7c23eac0abee-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 21 14:15:10 crc kubenswrapper[5133]: I1121 14:15:10.528370 5133 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/39394758-b0aa-434c-b296-7c23eac0abee-inventory\") on node \"crc\" DevicePath \"\"" Nov 21 14:15:10 crc kubenswrapper[5133]: I1121 14:15:10.872623 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-6rjkp" event={"ID":"39394758-b0aa-434c-b296-7c23eac0abee","Type":"ContainerDied","Data":"631addb03abc2a70748865d7f71293b0d37378655ec211c9541c2b39e044cda3"} Nov 21 14:15:10 crc kubenswrapper[5133]: I1121 14:15:10.872670 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-6rjkp" Nov 21 14:15:10 crc kubenswrapper[5133]: I1121 14:15:10.872692 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="631addb03abc2a70748865d7f71293b0d37378655ec211c9541c2b39e044cda3" Nov 21 14:15:10 crc kubenswrapper[5133]: I1121 14:15:10.984801 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-d8prn"] Nov 21 14:15:10 crc kubenswrapper[5133]: E1121 14:15:10.985650 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39394758-b0aa-434c-b296-7c23eac0abee" containerName="ceph-hci-pre-edpm-deployment-openstack-edpm-ipam" Nov 21 14:15:10 crc kubenswrapper[5133]: I1121 14:15:10.985689 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="39394758-b0aa-434c-b296-7c23eac0abee" containerName="ceph-hci-pre-edpm-deployment-openstack-edpm-ipam" Nov 21 14:15:10 crc kubenswrapper[5133]: E1121 14:15:10.985725 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="129a90f1-ca6c-4eb2-8130-ede8a21ac65a" containerName="collect-profiles" Nov 21 14:15:10 crc kubenswrapper[5133]: I1121 14:15:10.985731 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="129a90f1-ca6c-4eb2-8130-ede8a21ac65a" containerName="collect-profiles" Nov 21 14:15:10 crc kubenswrapper[5133]: I1121 14:15:10.988033 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="129a90f1-ca6c-4eb2-8130-ede8a21ac65a" containerName="collect-profiles" Nov 21 14:15:10 crc kubenswrapper[5133]: I1121 14:15:10.988089 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="39394758-b0aa-434c-b296-7c23eac0abee" containerName="ceph-hci-pre-edpm-deployment-openstack-edpm-ipam" Nov 21 14:15:10 crc kubenswrapper[5133]: I1121 14:15:10.989332 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-d8prn" Nov 21 14:15:10 crc kubenswrapper[5133]: I1121 14:15:10.991835 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-ztmk4" Nov 21 14:15:10 crc kubenswrapper[5133]: I1121 14:15:10.992100 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 21 14:15:10 crc kubenswrapper[5133]: I1121 14:15:10.992639 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 21 14:15:10 crc kubenswrapper[5133]: I1121 14:15:10.994914 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 21 14:15:11 crc kubenswrapper[5133]: I1121 14:15:11.013853 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-d8prn"] Nov 21 14:15:11 crc kubenswrapper[5133]: I1121 14:15:11.139070 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7d1a1428-a43a-494b-9478-92dba073c524-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-d8prn\" (UID: \"7d1a1428-a43a-494b-9478-92dba073c524\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-d8prn" Nov 21 14:15:11 crc kubenswrapper[5133]: I1121 14:15:11.139133 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7d1a1428-a43a-494b-9478-92dba073c524-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-d8prn\" (UID: \"7d1a1428-a43a-494b-9478-92dba073c524\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-d8prn" Nov 21 14:15:11 crc kubenswrapper[5133]: I1121 14:15:11.139190 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-29lr2\" (UniqueName: \"kubernetes.io/projected/7d1a1428-a43a-494b-9478-92dba073c524-kube-api-access-29lr2\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-d8prn\" (UID: \"7d1a1428-a43a-494b-9478-92dba073c524\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-d8prn" Nov 21 14:15:11 crc kubenswrapper[5133]: I1121 14:15:11.240814 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7d1a1428-a43a-494b-9478-92dba073c524-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-d8prn\" (UID: \"7d1a1428-a43a-494b-9478-92dba073c524\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-d8prn" Nov 21 14:15:11 crc kubenswrapper[5133]: I1121 14:15:11.240902 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-29lr2\" (UniqueName: \"kubernetes.io/projected/7d1a1428-a43a-494b-9478-92dba073c524-kube-api-access-29lr2\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-d8prn\" (UID: \"7d1a1428-a43a-494b-9478-92dba073c524\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-d8prn" Nov 21 14:15:11 crc kubenswrapper[5133]: I1121 14:15:11.241125 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7d1a1428-a43a-494b-9478-92dba073c524-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-d8prn\" (UID: \"7d1a1428-a43a-494b-9478-92dba073c524\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-d8prn" Nov 21 14:15:11 crc kubenswrapper[5133]: I1121 14:15:11.244884 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7d1a1428-a43a-494b-9478-92dba073c524-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-d8prn\" (UID: \"7d1a1428-a43a-494b-9478-92dba073c524\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-d8prn" Nov 21 14:15:11 crc kubenswrapper[5133]: I1121 14:15:11.248380 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7d1a1428-a43a-494b-9478-92dba073c524-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-d8prn\" (UID: \"7d1a1428-a43a-494b-9478-92dba073c524\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-d8prn" Nov 21 14:15:11 crc kubenswrapper[5133]: I1121 14:15:11.262037 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-29lr2\" (UniqueName: \"kubernetes.io/projected/7d1a1428-a43a-494b-9478-92dba073c524-kube-api-access-29lr2\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-d8prn\" (UID: \"7d1a1428-a43a-494b-9478-92dba073c524\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-d8prn" Nov 21 14:15:11 crc kubenswrapper[5133]: I1121 14:15:11.316191 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-d8prn" Nov 21 14:15:11 crc kubenswrapper[5133]: I1121 14:15:11.946463 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-d8prn"] Nov 21 14:15:11 crc kubenswrapper[5133]: W1121 14:15:11.956259 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7d1a1428_a43a_494b_9478_92dba073c524.slice/crio-343fbe4f8597c7a73a06da7170871b5fa68aed733d1f7706943dd33951b0681c WatchSource:0}: Error finding container 343fbe4f8597c7a73a06da7170871b5fa68aed733d1f7706943dd33951b0681c: Status 404 returned error can't find the container with id 343fbe4f8597c7a73a06da7170871b5fa68aed733d1f7706943dd33951b0681c Nov 21 14:15:12 crc kubenswrapper[5133]: I1121 14:15:12.893096 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-d8prn" event={"ID":"7d1a1428-a43a-494b-9478-92dba073c524","Type":"ContainerStarted","Data":"6b3405b74425624cff63d2e4b9590b860c945b68b04c30d869af9b4c1098c58a"} Nov 21 14:15:12 crc kubenswrapper[5133]: I1121 14:15:12.893622 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-d8prn" event={"ID":"7d1a1428-a43a-494b-9478-92dba073c524","Type":"ContainerStarted","Data":"343fbe4f8597c7a73a06da7170871b5fa68aed733d1f7706943dd33951b0681c"} Nov 21 14:15:12 crc kubenswrapper[5133]: I1121 14:15:12.915756 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-d8prn" podStartSLOduration=2.4467681629999998 podStartE2EDuration="2.915734398s" podCreationTimestamp="2025-11-21 14:15:10 +0000 UTC" firstStartedPulling="2025-11-21 14:15:11.959581449 +0000 UTC m=+1971.757413697" lastFinishedPulling="2025-11-21 14:15:12.428547684 +0000 UTC m=+1972.226379932" observedRunningTime="2025-11-21 14:15:12.906945192 +0000 UTC m=+1972.704777480" watchObservedRunningTime="2025-11-21 14:15:12.915734398 +0000 UTC m=+1972.713566656" Nov 21 14:15:27 crc kubenswrapper[5133]: I1121 14:15:27.064560 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-p6q9z"] Nov 21 14:15:27 crc kubenswrapper[5133]: I1121 14:15:27.075559 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-ad6a-account-create-5whkz"] Nov 21 14:15:27 crc kubenswrapper[5133]: I1121 14:15:27.087158 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-128b-account-create-rbwdb"] Nov 21 14:15:27 crc kubenswrapper[5133]: I1121 14:15:27.093904 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-d999-account-create-5tk8j"] Nov 21 14:15:27 crc kubenswrapper[5133]: I1121 14:15:27.101043 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-b887d"] Nov 21 14:15:27 crc kubenswrapper[5133]: I1121 14:15:27.107593 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-p6q9z"] Nov 21 14:15:27 crc kubenswrapper[5133]: I1121 14:15:27.113539 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-wdldd"] Nov 21 14:15:27 crc kubenswrapper[5133]: I1121 14:15:27.119021 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-b887d"] Nov 21 14:15:27 crc kubenswrapper[5133]: I1121 14:15:27.124448 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-128b-account-create-rbwdb"] Nov 21 14:15:27 crc kubenswrapper[5133]: I1121 14:15:27.129913 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-d999-account-create-5tk8j"] Nov 21 14:15:27 crc kubenswrapper[5133]: I1121 14:15:27.135401 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-ad6a-account-create-5whkz"] Nov 21 14:15:27 crc kubenswrapper[5133]: I1121 14:15:27.142102 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-wdldd"] Nov 21 14:15:28 crc kubenswrapper[5133]: I1121 14:15:28.476302 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="07bf540e-b52e-48fa-8c2d-2b95483089c4" path="/var/lib/kubelet/pods/07bf540e-b52e-48fa-8c2d-2b95483089c4/volumes" Nov 21 14:15:28 crc kubenswrapper[5133]: I1121 14:15:28.477597 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1a65a32c-3d97-4037-805b-07a5236fbaae" path="/var/lib/kubelet/pods/1a65a32c-3d97-4037-805b-07a5236fbaae/volumes" Nov 21 14:15:28 crc kubenswrapper[5133]: I1121 14:15:28.478800 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7f29a717-fad3-49bb-96bb-6d753e70d414" path="/var/lib/kubelet/pods/7f29a717-fad3-49bb-96bb-6d753e70d414/volumes" Nov 21 14:15:28 crc kubenswrapper[5133]: I1121 14:15:28.480184 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc741b72-a799-418c-8071-12d8027de48a" path="/var/lib/kubelet/pods/bc741b72-a799-418c-8071-12d8027de48a/volumes" Nov 21 14:15:28 crc kubenswrapper[5133]: I1121 14:15:28.482133 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c609a47f-a27a-4a9e-b3bc-6e8f7dfafc01" path="/var/lib/kubelet/pods/c609a47f-a27a-4a9e-b3bc-6e8f7dfafc01/volumes" Nov 21 14:15:28 crc kubenswrapper[5133]: I1121 14:15:28.482693 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c9efa168-aaaa-43e7-8cce-6f6797585494" path="/var/lib/kubelet/pods/c9efa168-aaaa-43e7-8cce-6f6797585494/volumes" Nov 21 14:15:36 crc kubenswrapper[5133]: I1121 14:15:36.443613 5133 scope.go:117] "RemoveContainer" containerID="8e1c368d98e96f298e4e9df9f519f49757c7ef2a1a422bfcfaf9033f3a4bc012" Nov 21 14:15:36 crc kubenswrapper[5133]: I1121 14:15:36.482173 5133 scope.go:117] "RemoveContainer" containerID="4b91809b0e979932e9f078f62f8ba8bca15983a68e9e784a3727a3081ba8e8ca" Nov 21 14:15:36 crc kubenswrapper[5133]: I1121 14:15:36.516585 5133 scope.go:117] "RemoveContainer" containerID="6ea3607fd21d89052b574ed3d55d0e742377c327e98bbdd13b81ee269d042098" Nov 21 14:15:36 crc kubenswrapper[5133]: I1121 14:15:36.557627 5133 scope.go:117] "RemoveContainer" containerID="85feb45cdd702ace893e626bea50249d4b3be788fd6fc9c099415fcba60b41f5" Nov 21 14:15:36 crc kubenswrapper[5133]: I1121 14:15:36.597709 5133 scope.go:117] "RemoveContainer" containerID="66a435a0ab3d27cf95142f55dde169936e4fc6034470dd3e5a5c8401d6cd6a5c" Nov 21 14:15:36 crc kubenswrapper[5133]: I1121 14:15:36.631807 5133 scope.go:117] "RemoveContainer" containerID="3494c50fa90e29715228d64226f1a7d95710c4cb930d522c966e254df0e38021" Nov 21 14:15:36 crc kubenswrapper[5133]: I1121 14:15:36.694954 5133 scope.go:117] "RemoveContainer" containerID="d252e144cec13d98268fe83087b99764332e2dfbdac245e47d37e15c587dc6de" Nov 21 14:15:36 crc kubenswrapper[5133]: I1121 14:15:36.720788 5133 scope.go:117] "RemoveContainer" containerID="4b59e8149c82d8cc93880a618af3bf50d2a740da199903690edf595cf288f49b" Nov 21 14:15:53 crc kubenswrapper[5133]: I1121 14:15:53.060623 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-97b9q"] Nov 21 14:15:53 crc kubenswrapper[5133]: I1121 14:15:53.070850 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-97b9q"] Nov 21 14:15:54 crc kubenswrapper[5133]: I1121 14:15:54.478506 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d6200bb-1d14-4055-b2e3-448d9e43254b" path="/var/lib/kubelet/pods/1d6200bb-1d14-4055-b2e3-448d9e43254b/volumes" Nov 21 14:16:10 crc kubenswrapper[5133]: I1121 14:16:10.475832 5133 generic.go:334] "Generic (PLEG): container finished" podID="7d1a1428-a43a-494b-9478-92dba073c524" containerID="6b3405b74425624cff63d2e4b9590b860c945b68b04c30d869af9b4c1098c58a" exitCode=0 Nov 21 14:16:10 crc kubenswrapper[5133]: I1121 14:16:10.475954 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-d8prn" event={"ID":"7d1a1428-a43a-494b-9478-92dba073c524","Type":"ContainerDied","Data":"6b3405b74425624cff63d2e4b9590b860c945b68b04c30d869af9b4c1098c58a"} Nov 21 14:16:11 crc kubenswrapper[5133]: I1121 14:16:11.920712 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-d8prn" Nov 21 14:16:12 crc kubenswrapper[5133]: I1121 14:16:12.078953 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7d1a1428-a43a-494b-9478-92dba073c524-inventory\") pod \"7d1a1428-a43a-494b-9478-92dba073c524\" (UID: \"7d1a1428-a43a-494b-9478-92dba073c524\") " Nov 21 14:16:12 crc kubenswrapper[5133]: I1121 14:16:12.081214 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-29lr2\" (UniqueName: \"kubernetes.io/projected/7d1a1428-a43a-494b-9478-92dba073c524-kube-api-access-29lr2\") pod \"7d1a1428-a43a-494b-9478-92dba073c524\" (UID: \"7d1a1428-a43a-494b-9478-92dba073c524\") " Nov 21 14:16:12 crc kubenswrapper[5133]: I1121 14:16:12.083537 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7d1a1428-a43a-494b-9478-92dba073c524-ssh-key\") pod \"7d1a1428-a43a-494b-9478-92dba073c524\" (UID: \"7d1a1428-a43a-494b-9478-92dba073c524\") " Nov 21 14:16:12 crc kubenswrapper[5133]: I1121 14:16:12.098554 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7d1a1428-a43a-494b-9478-92dba073c524-kube-api-access-29lr2" (OuterVolumeSpecName: "kube-api-access-29lr2") pod "7d1a1428-a43a-494b-9478-92dba073c524" (UID: "7d1a1428-a43a-494b-9478-92dba073c524"). InnerVolumeSpecName "kube-api-access-29lr2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:16:12 crc kubenswrapper[5133]: I1121 14:16:12.120342 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7d1a1428-a43a-494b-9478-92dba073c524-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "7d1a1428-a43a-494b-9478-92dba073c524" (UID: "7d1a1428-a43a-494b-9478-92dba073c524"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:16:12 crc kubenswrapper[5133]: I1121 14:16:12.122343 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7d1a1428-a43a-494b-9478-92dba073c524-inventory" (OuterVolumeSpecName: "inventory") pod "7d1a1428-a43a-494b-9478-92dba073c524" (UID: "7d1a1428-a43a-494b-9478-92dba073c524"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:16:12 crc kubenswrapper[5133]: I1121 14:16:12.185576 5133 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7d1a1428-a43a-494b-9478-92dba073c524-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 21 14:16:12 crc kubenswrapper[5133]: I1121 14:16:12.185614 5133 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7d1a1428-a43a-494b-9478-92dba073c524-inventory\") on node \"crc\" DevicePath \"\"" Nov 21 14:16:12 crc kubenswrapper[5133]: I1121 14:16:12.185625 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-29lr2\" (UniqueName: \"kubernetes.io/projected/7d1a1428-a43a-494b-9478-92dba073c524-kube-api-access-29lr2\") on node \"crc\" DevicePath \"\"" Nov 21 14:16:12 crc kubenswrapper[5133]: I1121 14:16:12.498973 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-d8prn" event={"ID":"7d1a1428-a43a-494b-9478-92dba073c524","Type":"ContainerDied","Data":"343fbe4f8597c7a73a06da7170871b5fa68aed733d1f7706943dd33951b0681c"} Nov 21 14:16:12 crc kubenswrapper[5133]: I1121 14:16:12.499030 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="343fbe4f8597c7a73a06da7170871b5fa68aed733d1f7706943dd33951b0681c" Nov 21 14:16:12 crc kubenswrapper[5133]: I1121 14:16:12.499060 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-d8prn" Nov 21 14:16:12 crc kubenswrapper[5133]: I1121 14:16:12.620174 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-m8sxg"] Nov 21 14:16:12 crc kubenswrapper[5133]: E1121 14:16:12.620868 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7d1a1428-a43a-494b-9478-92dba073c524" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 21 14:16:12 crc kubenswrapper[5133]: I1121 14:16:12.620891 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="7d1a1428-a43a-494b-9478-92dba073c524" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 21 14:16:12 crc kubenswrapper[5133]: I1121 14:16:12.622087 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="7d1a1428-a43a-494b-9478-92dba073c524" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 21 14:16:12 crc kubenswrapper[5133]: I1121 14:16:12.623174 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-m8sxg" Nov 21 14:16:12 crc kubenswrapper[5133]: I1121 14:16:12.625662 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 21 14:16:12 crc kubenswrapper[5133]: I1121 14:16:12.627088 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 21 14:16:12 crc kubenswrapper[5133]: I1121 14:16:12.627215 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 21 14:16:12 crc kubenswrapper[5133]: I1121 14:16:12.627280 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-ztmk4" Nov 21 14:16:12 crc kubenswrapper[5133]: I1121 14:16:12.661266 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-m8sxg"] Nov 21 14:16:12 crc kubenswrapper[5133]: I1121 14:16:12.795335 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fs7c6\" (UniqueName: \"kubernetes.io/projected/e8455706-f0ef-47cc-8b9f-48390f4b4fa4-kube-api-access-fs7c6\") pod \"ssh-known-hosts-edpm-deployment-m8sxg\" (UID: \"e8455706-f0ef-47cc-8b9f-48390f4b4fa4\") " pod="openstack/ssh-known-hosts-edpm-deployment-m8sxg" Nov 21 14:16:12 crc kubenswrapper[5133]: I1121 14:16:12.795431 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/e8455706-f0ef-47cc-8b9f-48390f4b4fa4-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-m8sxg\" (UID: \"e8455706-f0ef-47cc-8b9f-48390f4b4fa4\") " pod="openstack/ssh-known-hosts-edpm-deployment-m8sxg" Nov 21 14:16:12 crc kubenswrapper[5133]: I1121 14:16:12.795817 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/e8455706-f0ef-47cc-8b9f-48390f4b4fa4-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-m8sxg\" (UID: \"e8455706-f0ef-47cc-8b9f-48390f4b4fa4\") " pod="openstack/ssh-known-hosts-edpm-deployment-m8sxg" Nov 21 14:16:12 crc kubenswrapper[5133]: I1121 14:16:12.898145 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fs7c6\" (UniqueName: \"kubernetes.io/projected/e8455706-f0ef-47cc-8b9f-48390f4b4fa4-kube-api-access-fs7c6\") pod \"ssh-known-hosts-edpm-deployment-m8sxg\" (UID: \"e8455706-f0ef-47cc-8b9f-48390f4b4fa4\") " pod="openstack/ssh-known-hosts-edpm-deployment-m8sxg" Nov 21 14:16:12 crc kubenswrapper[5133]: I1121 14:16:12.898255 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/e8455706-f0ef-47cc-8b9f-48390f4b4fa4-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-m8sxg\" (UID: \"e8455706-f0ef-47cc-8b9f-48390f4b4fa4\") " pod="openstack/ssh-known-hosts-edpm-deployment-m8sxg" Nov 21 14:16:12 crc kubenswrapper[5133]: I1121 14:16:12.898366 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/e8455706-f0ef-47cc-8b9f-48390f4b4fa4-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-m8sxg\" (UID: \"e8455706-f0ef-47cc-8b9f-48390f4b4fa4\") " pod="openstack/ssh-known-hosts-edpm-deployment-m8sxg" Nov 21 14:16:12 crc kubenswrapper[5133]: I1121 14:16:12.906249 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/e8455706-f0ef-47cc-8b9f-48390f4b4fa4-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-m8sxg\" (UID: \"e8455706-f0ef-47cc-8b9f-48390f4b4fa4\") " pod="openstack/ssh-known-hosts-edpm-deployment-m8sxg" Nov 21 14:16:12 crc kubenswrapper[5133]: I1121 14:16:12.906818 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/e8455706-f0ef-47cc-8b9f-48390f4b4fa4-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-m8sxg\" (UID: \"e8455706-f0ef-47cc-8b9f-48390f4b4fa4\") " pod="openstack/ssh-known-hosts-edpm-deployment-m8sxg" Nov 21 14:16:12 crc kubenswrapper[5133]: I1121 14:16:12.919424 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fs7c6\" (UniqueName: \"kubernetes.io/projected/e8455706-f0ef-47cc-8b9f-48390f4b4fa4-kube-api-access-fs7c6\") pod \"ssh-known-hosts-edpm-deployment-m8sxg\" (UID: \"e8455706-f0ef-47cc-8b9f-48390f4b4fa4\") " pod="openstack/ssh-known-hosts-edpm-deployment-m8sxg" Nov 21 14:16:12 crc kubenswrapper[5133]: I1121 14:16:12.954399 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-m8sxg" Nov 21 14:16:13 crc kubenswrapper[5133]: I1121 14:16:13.556100 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-m8sxg"] Nov 21 14:16:14 crc kubenswrapper[5133]: I1121 14:16:14.522282 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-m8sxg" event={"ID":"e8455706-f0ef-47cc-8b9f-48390f4b4fa4","Type":"ContainerStarted","Data":"3eb41b1518dedfbb91f03a54a2e535c4e09ce3ecb0746940b9bbfa7e1811073e"} Nov 21 14:16:14 crc kubenswrapper[5133]: I1121 14:16:14.522745 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-m8sxg" event={"ID":"e8455706-f0ef-47cc-8b9f-48390f4b4fa4","Type":"ContainerStarted","Data":"ba2243b6f12220309cc86c42f00c30c62001026b09d3dcda3853d80c03f7c58b"} Nov 21 14:16:14 crc kubenswrapper[5133]: I1121 14:16:14.542459 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ssh-known-hosts-edpm-deployment-m8sxg" podStartSLOduration=1.87769239 podStartE2EDuration="2.542436862s" podCreationTimestamp="2025-11-21 14:16:12 +0000 UTC" firstStartedPulling="2025-11-21 14:16:13.574154505 +0000 UTC m=+2033.371986753" lastFinishedPulling="2025-11-21 14:16:14.238898967 +0000 UTC m=+2034.036731225" observedRunningTime="2025-11-21 14:16:14.541906228 +0000 UTC m=+2034.339738476" watchObservedRunningTime="2025-11-21 14:16:14.542436862 +0000 UTC m=+2034.340269110" Nov 21 14:16:17 crc kubenswrapper[5133]: I1121 14:16:17.037798 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-pk7zb"] Nov 21 14:16:17 crc kubenswrapper[5133]: I1121 14:16:17.052139 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-pk7zb"] Nov 21 14:16:18 crc kubenswrapper[5133]: I1121 14:16:18.471565 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="18aab30d-6eea-41e2-966c-371f6e0a2e76" path="/var/lib/kubelet/pods/18aab30d-6eea-41e2-966c-371f6e0a2e76/volumes" Nov 21 14:16:19 crc kubenswrapper[5133]: I1121 14:16:19.083532 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-bf6r4"] Nov 21 14:16:19 crc kubenswrapper[5133]: I1121 14:16:19.093861 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-bf6r4"] Nov 21 14:16:20 crc kubenswrapper[5133]: I1121 14:16:20.472686 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="32a69b88-3e19-4aca-ad8e-0a5b35c136cf" path="/var/lib/kubelet/pods/32a69b88-3e19-4aca-ad8e-0a5b35c136cf/volumes" Nov 21 14:16:21 crc kubenswrapper[5133]: I1121 14:16:21.595052 5133 generic.go:334] "Generic (PLEG): container finished" podID="e8455706-f0ef-47cc-8b9f-48390f4b4fa4" containerID="3eb41b1518dedfbb91f03a54a2e535c4e09ce3ecb0746940b9bbfa7e1811073e" exitCode=0 Nov 21 14:16:21 crc kubenswrapper[5133]: I1121 14:16:21.595118 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-m8sxg" event={"ID":"e8455706-f0ef-47cc-8b9f-48390f4b4fa4","Type":"ContainerDied","Data":"3eb41b1518dedfbb91f03a54a2e535c4e09ce3ecb0746940b9bbfa7e1811073e"} Nov 21 14:16:21 crc kubenswrapper[5133]: E1121 14:16:21.668587 5133 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode8455706_f0ef_47cc_8b9f_48390f4b4fa4.slice/crio-3eb41b1518dedfbb91f03a54a2e535c4e09ce3ecb0746940b9bbfa7e1811073e.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode8455706_f0ef_47cc_8b9f_48390f4b4fa4.slice/crio-conmon-3eb41b1518dedfbb91f03a54a2e535c4e09ce3ecb0746940b9bbfa7e1811073e.scope\": RecentStats: unable to find data in memory cache]" Nov 21 14:16:23 crc kubenswrapper[5133]: I1121 14:16:23.051476 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-m8sxg" Nov 21 14:16:23 crc kubenswrapper[5133]: I1121 14:16:23.102507 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/e8455706-f0ef-47cc-8b9f-48390f4b4fa4-inventory-0\") pod \"e8455706-f0ef-47cc-8b9f-48390f4b4fa4\" (UID: \"e8455706-f0ef-47cc-8b9f-48390f4b4fa4\") " Nov 21 14:16:23 crc kubenswrapper[5133]: I1121 14:16:23.102673 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/e8455706-f0ef-47cc-8b9f-48390f4b4fa4-ssh-key-openstack-edpm-ipam\") pod \"e8455706-f0ef-47cc-8b9f-48390f4b4fa4\" (UID: \"e8455706-f0ef-47cc-8b9f-48390f4b4fa4\") " Nov 21 14:16:23 crc kubenswrapper[5133]: I1121 14:16:23.102735 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fs7c6\" (UniqueName: \"kubernetes.io/projected/e8455706-f0ef-47cc-8b9f-48390f4b4fa4-kube-api-access-fs7c6\") pod \"e8455706-f0ef-47cc-8b9f-48390f4b4fa4\" (UID: \"e8455706-f0ef-47cc-8b9f-48390f4b4fa4\") " Nov 21 14:16:23 crc kubenswrapper[5133]: I1121 14:16:23.109405 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e8455706-f0ef-47cc-8b9f-48390f4b4fa4-kube-api-access-fs7c6" (OuterVolumeSpecName: "kube-api-access-fs7c6") pod "e8455706-f0ef-47cc-8b9f-48390f4b4fa4" (UID: "e8455706-f0ef-47cc-8b9f-48390f4b4fa4"). InnerVolumeSpecName "kube-api-access-fs7c6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:16:23 crc kubenswrapper[5133]: I1121 14:16:23.138615 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e8455706-f0ef-47cc-8b9f-48390f4b4fa4-inventory-0" (OuterVolumeSpecName: "inventory-0") pod "e8455706-f0ef-47cc-8b9f-48390f4b4fa4" (UID: "e8455706-f0ef-47cc-8b9f-48390f4b4fa4"). InnerVolumeSpecName "inventory-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:16:23 crc kubenswrapper[5133]: I1121 14:16:23.150722 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e8455706-f0ef-47cc-8b9f-48390f4b4fa4-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "e8455706-f0ef-47cc-8b9f-48390f4b4fa4" (UID: "e8455706-f0ef-47cc-8b9f-48390f4b4fa4"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:16:23 crc kubenswrapper[5133]: I1121 14:16:23.205500 5133 reconciler_common.go:293] "Volume detached for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/e8455706-f0ef-47cc-8b9f-48390f4b4fa4-inventory-0\") on node \"crc\" DevicePath \"\"" Nov 21 14:16:23 crc kubenswrapper[5133]: I1121 14:16:23.205555 5133 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/e8455706-f0ef-47cc-8b9f-48390f4b4fa4-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Nov 21 14:16:23 crc kubenswrapper[5133]: I1121 14:16:23.205575 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fs7c6\" (UniqueName: \"kubernetes.io/projected/e8455706-f0ef-47cc-8b9f-48390f4b4fa4-kube-api-access-fs7c6\") on node \"crc\" DevicePath \"\"" Nov 21 14:16:23 crc kubenswrapper[5133]: I1121 14:16:23.614627 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-m8sxg" event={"ID":"e8455706-f0ef-47cc-8b9f-48390f4b4fa4","Type":"ContainerDied","Data":"ba2243b6f12220309cc86c42f00c30c62001026b09d3dcda3853d80c03f7c58b"} Nov 21 14:16:23 crc kubenswrapper[5133]: I1121 14:16:23.614676 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ba2243b6f12220309cc86c42f00c30c62001026b09d3dcda3853d80c03f7c58b" Nov 21 14:16:23 crc kubenswrapper[5133]: I1121 14:16:23.614738 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-m8sxg" Nov 21 14:16:23 crc kubenswrapper[5133]: I1121 14:16:23.699272 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-6rmbg"] Nov 21 14:16:23 crc kubenswrapper[5133]: E1121 14:16:23.699786 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e8455706-f0ef-47cc-8b9f-48390f4b4fa4" containerName="ssh-known-hosts-edpm-deployment" Nov 21 14:16:23 crc kubenswrapper[5133]: I1121 14:16:23.699813 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="e8455706-f0ef-47cc-8b9f-48390f4b4fa4" containerName="ssh-known-hosts-edpm-deployment" Nov 21 14:16:23 crc kubenswrapper[5133]: I1121 14:16:23.700208 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="e8455706-f0ef-47cc-8b9f-48390f4b4fa4" containerName="ssh-known-hosts-edpm-deployment" Nov 21 14:16:23 crc kubenswrapper[5133]: I1121 14:16:23.701178 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-6rmbg" Nov 21 14:16:23 crc kubenswrapper[5133]: I1121 14:16:23.704423 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 21 14:16:23 crc kubenswrapper[5133]: I1121 14:16:23.704705 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 21 14:16:23 crc kubenswrapper[5133]: I1121 14:16:23.704868 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 21 14:16:23 crc kubenswrapper[5133]: I1121 14:16:23.705137 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-ztmk4" Nov 21 14:16:23 crc kubenswrapper[5133]: I1121 14:16:23.712278 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-6rmbg"] Nov 21 14:16:23 crc kubenswrapper[5133]: I1121 14:16:23.817070 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0c99cd14-85e0-4b44-ace2-8a624ff98e60-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-6rmbg\" (UID: \"0c99cd14-85e0-4b44-ace2-8a624ff98e60\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-6rmbg" Nov 21 14:16:23 crc kubenswrapper[5133]: I1121 14:16:23.817175 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xz8dd\" (UniqueName: \"kubernetes.io/projected/0c99cd14-85e0-4b44-ace2-8a624ff98e60-kube-api-access-xz8dd\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-6rmbg\" (UID: \"0c99cd14-85e0-4b44-ace2-8a624ff98e60\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-6rmbg" Nov 21 14:16:23 crc kubenswrapper[5133]: I1121 14:16:23.817209 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0c99cd14-85e0-4b44-ace2-8a624ff98e60-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-6rmbg\" (UID: \"0c99cd14-85e0-4b44-ace2-8a624ff98e60\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-6rmbg" Nov 21 14:16:23 crc kubenswrapper[5133]: I1121 14:16:23.919428 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0c99cd14-85e0-4b44-ace2-8a624ff98e60-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-6rmbg\" (UID: \"0c99cd14-85e0-4b44-ace2-8a624ff98e60\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-6rmbg" Nov 21 14:16:23 crc kubenswrapper[5133]: I1121 14:16:23.919544 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xz8dd\" (UniqueName: \"kubernetes.io/projected/0c99cd14-85e0-4b44-ace2-8a624ff98e60-kube-api-access-xz8dd\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-6rmbg\" (UID: \"0c99cd14-85e0-4b44-ace2-8a624ff98e60\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-6rmbg" Nov 21 14:16:23 crc kubenswrapper[5133]: I1121 14:16:23.919575 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0c99cd14-85e0-4b44-ace2-8a624ff98e60-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-6rmbg\" (UID: \"0c99cd14-85e0-4b44-ace2-8a624ff98e60\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-6rmbg" Nov 21 14:16:23 crc kubenswrapper[5133]: I1121 14:16:23.923839 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0c99cd14-85e0-4b44-ace2-8a624ff98e60-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-6rmbg\" (UID: \"0c99cd14-85e0-4b44-ace2-8a624ff98e60\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-6rmbg" Nov 21 14:16:23 crc kubenswrapper[5133]: I1121 14:16:23.925667 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0c99cd14-85e0-4b44-ace2-8a624ff98e60-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-6rmbg\" (UID: \"0c99cd14-85e0-4b44-ace2-8a624ff98e60\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-6rmbg" Nov 21 14:16:23 crc kubenswrapper[5133]: I1121 14:16:23.951227 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xz8dd\" (UniqueName: \"kubernetes.io/projected/0c99cd14-85e0-4b44-ace2-8a624ff98e60-kube-api-access-xz8dd\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-6rmbg\" (UID: \"0c99cd14-85e0-4b44-ace2-8a624ff98e60\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-6rmbg" Nov 21 14:16:24 crc kubenswrapper[5133]: I1121 14:16:24.032599 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-6rmbg" Nov 21 14:16:24 crc kubenswrapper[5133]: I1121 14:16:24.596273 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-6rmbg"] Nov 21 14:16:24 crc kubenswrapper[5133]: W1121 14:16:24.608527 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0c99cd14_85e0_4b44_ace2_8a624ff98e60.slice/crio-008b7d52475b33b1129b78c506ccf8b954f27903ba3812d2638fa6719930db89 WatchSource:0}: Error finding container 008b7d52475b33b1129b78c506ccf8b954f27903ba3812d2638fa6719930db89: Status 404 returned error can't find the container with id 008b7d52475b33b1129b78c506ccf8b954f27903ba3812d2638fa6719930db89 Nov 21 14:16:24 crc kubenswrapper[5133]: I1121 14:16:24.624924 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-6rmbg" event={"ID":"0c99cd14-85e0-4b44-ace2-8a624ff98e60","Type":"ContainerStarted","Data":"008b7d52475b33b1129b78c506ccf8b954f27903ba3812d2638fa6719930db89"} Nov 21 14:16:25 crc kubenswrapper[5133]: I1121 14:16:25.639522 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-6rmbg" event={"ID":"0c99cd14-85e0-4b44-ace2-8a624ff98e60","Type":"ContainerStarted","Data":"64d4679b63c273b6c23751780d89e8e0b0c2d539414f1f7b6d6e4885141cfe6e"} Nov 21 14:16:25 crc kubenswrapper[5133]: I1121 14:16:25.654992 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-6rmbg" podStartSLOduration=2.203215675 podStartE2EDuration="2.654978037s" podCreationTimestamp="2025-11-21 14:16:23 +0000 UTC" firstStartedPulling="2025-11-21 14:16:24.612126704 +0000 UTC m=+2044.409958952" lastFinishedPulling="2025-11-21 14:16:25.063889056 +0000 UTC m=+2044.861721314" observedRunningTime="2025-11-21 14:16:25.653377013 +0000 UTC m=+2045.451209261" watchObservedRunningTime="2025-11-21 14:16:25.654978037 +0000 UTC m=+2045.452810285" Nov 21 14:16:35 crc kubenswrapper[5133]: I1121 14:16:35.759143 5133 generic.go:334] "Generic (PLEG): container finished" podID="0c99cd14-85e0-4b44-ace2-8a624ff98e60" containerID="64d4679b63c273b6c23751780d89e8e0b0c2d539414f1f7b6d6e4885141cfe6e" exitCode=0 Nov 21 14:16:35 crc kubenswrapper[5133]: I1121 14:16:35.759273 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-6rmbg" event={"ID":"0c99cd14-85e0-4b44-ace2-8a624ff98e60","Type":"ContainerDied","Data":"64d4679b63c273b6c23751780d89e8e0b0c2d539414f1f7b6d6e4885141cfe6e"} Nov 21 14:16:36 crc kubenswrapper[5133]: I1121 14:16:36.867616 5133 scope.go:117] "RemoveContainer" containerID="958ef6915ad5e8b47995172884a4e2caea9f1e9225533aafc54e62a38b8b9e98" Nov 21 14:16:36 crc kubenswrapper[5133]: I1121 14:16:36.933098 5133 scope.go:117] "RemoveContainer" containerID="0f599c0d88facb1c35cf520e99b361a218cd88f867257177221f74b93266befd" Nov 21 14:16:36 crc kubenswrapper[5133]: I1121 14:16:36.982158 5133 scope.go:117] "RemoveContainer" containerID="6027cdfe5863f852ffa876e53682937454491f8d55703dfcdd48068add898d82" Nov 21 14:16:37 crc kubenswrapper[5133]: I1121 14:16:37.307801 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-6rmbg" Nov 21 14:16:37 crc kubenswrapper[5133]: I1121 14:16:37.395898 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xz8dd\" (UniqueName: \"kubernetes.io/projected/0c99cd14-85e0-4b44-ace2-8a624ff98e60-kube-api-access-xz8dd\") pod \"0c99cd14-85e0-4b44-ace2-8a624ff98e60\" (UID: \"0c99cd14-85e0-4b44-ace2-8a624ff98e60\") " Nov 21 14:16:37 crc kubenswrapper[5133]: I1121 14:16:37.395995 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0c99cd14-85e0-4b44-ace2-8a624ff98e60-inventory\") pod \"0c99cd14-85e0-4b44-ace2-8a624ff98e60\" (UID: \"0c99cd14-85e0-4b44-ace2-8a624ff98e60\") " Nov 21 14:16:37 crc kubenswrapper[5133]: I1121 14:16:37.396129 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0c99cd14-85e0-4b44-ace2-8a624ff98e60-ssh-key\") pod \"0c99cd14-85e0-4b44-ace2-8a624ff98e60\" (UID: \"0c99cd14-85e0-4b44-ace2-8a624ff98e60\") " Nov 21 14:16:37 crc kubenswrapper[5133]: I1121 14:16:37.404484 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0c99cd14-85e0-4b44-ace2-8a624ff98e60-kube-api-access-xz8dd" (OuterVolumeSpecName: "kube-api-access-xz8dd") pod "0c99cd14-85e0-4b44-ace2-8a624ff98e60" (UID: "0c99cd14-85e0-4b44-ace2-8a624ff98e60"). InnerVolumeSpecName "kube-api-access-xz8dd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:16:37 crc kubenswrapper[5133]: I1121 14:16:37.430473 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c99cd14-85e0-4b44-ace2-8a624ff98e60-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "0c99cd14-85e0-4b44-ace2-8a624ff98e60" (UID: "0c99cd14-85e0-4b44-ace2-8a624ff98e60"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:16:37 crc kubenswrapper[5133]: I1121 14:16:37.430526 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c99cd14-85e0-4b44-ace2-8a624ff98e60-inventory" (OuterVolumeSpecName: "inventory") pod "0c99cd14-85e0-4b44-ace2-8a624ff98e60" (UID: "0c99cd14-85e0-4b44-ace2-8a624ff98e60"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:16:37 crc kubenswrapper[5133]: I1121 14:16:37.498893 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xz8dd\" (UniqueName: \"kubernetes.io/projected/0c99cd14-85e0-4b44-ace2-8a624ff98e60-kube-api-access-xz8dd\") on node \"crc\" DevicePath \"\"" Nov 21 14:16:37 crc kubenswrapper[5133]: I1121 14:16:37.498941 5133 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0c99cd14-85e0-4b44-ace2-8a624ff98e60-inventory\") on node \"crc\" DevicePath \"\"" Nov 21 14:16:37 crc kubenswrapper[5133]: I1121 14:16:37.498955 5133 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0c99cd14-85e0-4b44-ace2-8a624ff98e60-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 21 14:16:37 crc kubenswrapper[5133]: I1121 14:16:37.782877 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-6rmbg" event={"ID":"0c99cd14-85e0-4b44-ace2-8a624ff98e60","Type":"ContainerDied","Data":"008b7d52475b33b1129b78c506ccf8b954f27903ba3812d2638fa6719930db89"} Nov 21 14:16:37 crc kubenswrapper[5133]: I1121 14:16:37.782923 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="008b7d52475b33b1129b78c506ccf8b954f27903ba3812d2638fa6719930db89" Nov 21 14:16:37 crc kubenswrapper[5133]: I1121 14:16:37.782985 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-6rmbg" Nov 21 14:16:37 crc kubenswrapper[5133]: I1121 14:16:37.844891 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-7hj9b"] Nov 21 14:16:37 crc kubenswrapper[5133]: E1121 14:16:37.845272 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0c99cd14-85e0-4b44-ace2-8a624ff98e60" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 21 14:16:37 crc kubenswrapper[5133]: I1121 14:16:37.845289 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="0c99cd14-85e0-4b44-ace2-8a624ff98e60" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 21 14:16:37 crc kubenswrapper[5133]: I1121 14:16:37.845450 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="0c99cd14-85e0-4b44-ace2-8a624ff98e60" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 21 14:16:37 crc kubenswrapper[5133]: I1121 14:16:37.846020 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-7hj9b" Nov 21 14:16:37 crc kubenswrapper[5133]: I1121 14:16:37.848378 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 21 14:16:37 crc kubenswrapper[5133]: I1121 14:16:37.848524 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 21 14:16:37 crc kubenswrapper[5133]: I1121 14:16:37.848673 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 21 14:16:37 crc kubenswrapper[5133]: I1121 14:16:37.851660 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-ztmk4" Nov 21 14:16:37 crc kubenswrapper[5133]: I1121 14:16:37.860489 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-7hj9b"] Nov 21 14:16:37 crc kubenswrapper[5133]: I1121 14:16:37.908279 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cb8ce05b-7db5-4f97-8f82-93a7f9c00c83-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-7hj9b\" (UID: \"cb8ce05b-7db5-4f97-8f82-93a7f9c00c83\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-7hj9b" Nov 21 14:16:37 crc kubenswrapper[5133]: I1121 14:16:37.908323 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-468lw\" (UniqueName: \"kubernetes.io/projected/cb8ce05b-7db5-4f97-8f82-93a7f9c00c83-kube-api-access-468lw\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-7hj9b\" (UID: \"cb8ce05b-7db5-4f97-8f82-93a7f9c00c83\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-7hj9b" Nov 21 14:16:37 crc kubenswrapper[5133]: I1121 14:16:37.908386 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cb8ce05b-7db5-4f97-8f82-93a7f9c00c83-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-7hj9b\" (UID: \"cb8ce05b-7db5-4f97-8f82-93a7f9c00c83\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-7hj9b" Nov 21 14:16:38 crc kubenswrapper[5133]: I1121 14:16:38.010434 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cb8ce05b-7db5-4f97-8f82-93a7f9c00c83-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-7hj9b\" (UID: \"cb8ce05b-7db5-4f97-8f82-93a7f9c00c83\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-7hj9b" Nov 21 14:16:38 crc kubenswrapper[5133]: I1121 14:16:38.010590 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cb8ce05b-7db5-4f97-8f82-93a7f9c00c83-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-7hj9b\" (UID: \"cb8ce05b-7db5-4f97-8f82-93a7f9c00c83\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-7hj9b" Nov 21 14:16:38 crc kubenswrapper[5133]: I1121 14:16:38.010617 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-468lw\" (UniqueName: \"kubernetes.io/projected/cb8ce05b-7db5-4f97-8f82-93a7f9c00c83-kube-api-access-468lw\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-7hj9b\" (UID: \"cb8ce05b-7db5-4f97-8f82-93a7f9c00c83\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-7hj9b" Nov 21 14:16:38 crc kubenswrapper[5133]: I1121 14:16:38.016982 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cb8ce05b-7db5-4f97-8f82-93a7f9c00c83-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-7hj9b\" (UID: \"cb8ce05b-7db5-4f97-8f82-93a7f9c00c83\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-7hj9b" Nov 21 14:16:38 crc kubenswrapper[5133]: I1121 14:16:38.022474 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cb8ce05b-7db5-4f97-8f82-93a7f9c00c83-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-7hj9b\" (UID: \"cb8ce05b-7db5-4f97-8f82-93a7f9c00c83\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-7hj9b" Nov 21 14:16:38 crc kubenswrapper[5133]: I1121 14:16:38.028173 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-468lw\" (UniqueName: \"kubernetes.io/projected/cb8ce05b-7db5-4f97-8f82-93a7f9c00c83-kube-api-access-468lw\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-7hj9b\" (UID: \"cb8ce05b-7db5-4f97-8f82-93a7f9c00c83\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-7hj9b" Nov 21 14:16:38 crc kubenswrapper[5133]: I1121 14:16:38.181099 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-7hj9b" Nov 21 14:16:38 crc kubenswrapper[5133]: I1121 14:16:38.516302 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-7hj9b"] Nov 21 14:16:38 crc kubenswrapper[5133]: W1121 14:16:38.536925 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcb8ce05b_7db5_4f97_8f82_93a7f9c00c83.slice/crio-706fe0cf5d5e01fa9520e2006205253e38813b7948f7a969aa3da8460a90c3dd WatchSource:0}: Error finding container 706fe0cf5d5e01fa9520e2006205253e38813b7948f7a969aa3da8460a90c3dd: Status 404 returned error can't find the container with id 706fe0cf5d5e01fa9520e2006205253e38813b7948f7a969aa3da8460a90c3dd Nov 21 14:16:38 crc kubenswrapper[5133]: I1121 14:16:38.793310 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-7hj9b" event={"ID":"cb8ce05b-7db5-4f97-8f82-93a7f9c00c83","Type":"ContainerStarted","Data":"706fe0cf5d5e01fa9520e2006205253e38813b7948f7a969aa3da8460a90c3dd"} Nov 21 14:16:39 crc kubenswrapper[5133]: I1121 14:16:39.828031 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-7hj9b" event={"ID":"cb8ce05b-7db5-4f97-8f82-93a7f9c00c83","Type":"ContainerStarted","Data":"0a4bbd90d94eb25cb896ecd2edd0ab360894c487cac140d31208295287d3dce4"} Nov 21 14:16:39 crc kubenswrapper[5133]: I1121 14:16:39.866984 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-7hj9b" podStartSLOduration=2.3844289659999998 podStartE2EDuration="2.866955556s" podCreationTimestamp="2025-11-21 14:16:37 +0000 UTC" firstStartedPulling="2025-11-21 14:16:38.540032142 +0000 UTC m=+2058.337864400" lastFinishedPulling="2025-11-21 14:16:39.022558742 +0000 UTC m=+2058.820390990" observedRunningTime="2025-11-21 14:16:39.855262441 +0000 UTC m=+2059.653094709" watchObservedRunningTime="2025-11-21 14:16:39.866955556 +0000 UTC m=+2059.664787824" Nov 21 14:16:49 crc kubenswrapper[5133]: I1121 14:16:49.937606 5133 generic.go:334] "Generic (PLEG): container finished" podID="cb8ce05b-7db5-4f97-8f82-93a7f9c00c83" containerID="0a4bbd90d94eb25cb896ecd2edd0ab360894c487cac140d31208295287d3dce4" exitCode=0 Nov 21 14:16:49 crc kubenswrapper[5133]: I1121 14:16:49.937724 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-7hj9b" event={"ID":"cb8ce05b-7db5-4f97-8f82-93a7f9c00c83","Type":"ContainerDied","Data":"0a4bbd90d94eb25cb896ecd2edd0ab360894c487cac140d31208295287d3dce4"} Nov 21 14:16:51 crc kubenswrapper[5133]: I1121 14:16:51.362856 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-7hj9b" Nov 21 14:16:51 crc kubenswrapper[5133]: I1121 14:16:51.411903 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-468lw\" (UniqueName: \"kubernetes.io/projected/cb8ce05b-7db5-4f97-8f82-93a7f9c00c83-kube-api-access-468lw\") pod \"cb8ce05b-7db5-4f97-8f82-93a7f9c00c83\" (UID: \"cb8ce05b-7db5-4f97-8f82-93a7f9c00c83\") " Nov 21 14:16:51 crc kubenswrapper[5133]: I1121 14:16:51.412089 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cb8ce05b-7db5-4f97-8f82-93a7f9c00c83-inventory\") pod \"cb8ce05b-7db5-4f97-8f82-93a7f9c00c83\" (UID: \"cb8ce05b-7db5-4f97-8f82-93a7f9c00c83\") " Nov 21 14:16:51 crc kubenswrapper[5133]: I1121 14:16:51.412382 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cb8ce05b-7db5-4f97-8f82-93a7f9c00c83-ssh-key\") pod \"cb8ce05b-7db5-4f97-8f82-93a7f9c00c83\" (UID: \"cb8ce05b-7db5-4f97-8f82-93a7f9c00c83\") " Nov 21 14:16:51 crc kubenswrapper[5133]: I1121 14:16:51.418200 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cb8ce05b-7db5-4f97-8f82-93a7f9c00c83-kube-api-access-468lw" (OuterVolumeSpecName: "kube-api-access-468lw") pod "cb8ce05b-7db5-4f97-8f82-93a7f9c00c83" (UID: "cb8ce05b-7db5-4f97-8f82-93a7f9c00c83"). InnerVolumeSpecName "kube-api-access-468lw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:16:51 crc kubenswrapper[5133]: I1121 14:16:51.453878 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cb8ce05b-7db5-4f97-8f82-93a7f9c00c83-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "cb8ce05b-7db5-4f97-8f82-93a7f9c00c83" (UID: "cb8ce05b-7db5-4f97-8f82-93a7f9c00c83"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:16:51 crc kubenswrapper[5133]: I1121 14:16:51.457940 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cb8ce05b-7db5-4f97-8f82-93a7f9c00c83-inventory" (OuterVolumeSpecName: "inventory") pod "cb8ce05b-7db5-4f97-8f82-93a7f9c00c83" (UID: "cb8ce05b-7db5-4f97-8f82-93a7f9c00c83"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:16:51 crc kubenswrapper[5133]: I1121 14:16:51.517121 5133 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cb8ce05b-7db5-4f97-8f82-93a7f9c00c83-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 21 14:16:51 crc kubenswrapper[5133]: I1121 14:16:51.517156 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-468lw\" (UniqueName: \"kubernetes.io/projected/cb8ce05b-7db5-4f97-8f82-93a7f9c00c83-kube-api-access-468lw\") on node \"crc\" DevicePath \"\"" Nov 21 14:16:51 crc kubenswrapper[5133]: I1121 14:16:51.517172 5133 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cb8ce05b-7db5-4f97-8f82-93a7f9c00c83-inventory\") on node \"crc\" DevicePath \"\"" Nov 21 14:16:51 crc kubenswrapper[5133]: I1121 14:16:51.958405 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-7hj9b" event={"ID":"cb8ce05b-7db5-4f97-8f82-93a7f9c00c83","Type":"ContainerDied","Data":"706fe0cf5d5e01fa9520e2006205253e38813b7948f7a969aa3da8460a90c3dd"} Nov 21 14:16:51 crc kubenswrapper[5133]: I1121 14:16:51.958443 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="706fe0cf5d5e01fa9520e2006205253e38813b7948f7a969aa3da8460a90c3dd" Nov 21 14:16:51 crc kubenswrapper[5133]: I1121 14:16:51.958501 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-7hj9b" Nov 21 14:17:02 crc kubenswrapper[5133]: I1121 14:17:02.047686 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-mh8s9"] Nov 21 14:17:02 crc kubenswrapper[5133]: I1121 14:17:02.054462 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-mh8s9"] Nov 21 14:17:02 crc kubenswrapper[5133]: I1121 14:17:02.490721 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3618a361-7c80-4f07-9375-4753adce457f" path="/var/lib/kubelet/pods/3618a361-7c80-4f07-9375-4753adce457f/volumes" Nov 21 14:17:23 crc kubenswrapper[5133]: I1121 14:17:23.310711 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 14:17:23 crc kubenswrapper[5133]: I1121 14:17:23.311340 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 14:17:37 crc kubenswrapper[5133]: I1121 14:17:37.117232 5133 scope.go:117] "RemoveContainer" containerID="5b2e2955233f1622eb06eaf249125bf1d84e07a6dd1d87f2418254a89bb31cb1" Nov 21 14:17:53 crc kubenswrapper[5133]: I1121 14:17:53.311110 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 14:17:53 crc kubenswrapper[5133]: I1121 14:17:53.312192 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 14:18:23 crc kubenswrapper[5133]: I1121 14:18:23.311142 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 14:18:23 crc kubenswrapper[5133]: I1121 14:18:23.313351 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 14:18:23 crc kubenswrapper[5133]: I1121 14:18:23.313584 5133 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" Nov 21 14:18:23 crc kubenswrapper[5133]: I1121 14:18:23.314967 5133 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"2906b97bc9f85b4f70f32fefe8fea2cab9f1e58520d8b56a7cb015384ab82031"} pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 14:18:23 crc kubenswrapper[5133]: I1121 14:18:23.315339 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" containerID="cri-o://2906b97bc9f85b4f70f32fefe8fea2cab9f1e58520d8b56a7cb015384ab82031" gracePeriod=600 Nov 21 14:18:23 crc kubenswrapper[5133]: I1121 14:18:23.968599 5133 generic.go:334] "Generic (PLEG): container finished" podID="52f5a729-05d1-4f84-a216-1df3233af57d" containerID="2906b97bc9f85b4f70f32fefe8fea2cab9f1e58520d8b56a7cb015384ab82031" exitCode=0 Nov 21 14:18:23 crc kubenswrapper[5133]: I1121 14:18:23.968653 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" event={"ID":"52f5a729-05d1-4f84-a216-1df3233af57d","Type":"ContainerDied","Data":"2906b97bc9f85b4f70f32fefe8fea2cab9f1e58520d8b56a7cb015384ab82031"} Nov 21 14:18:23 crc kubenswrapper[5133]: I1121 14:18:23.969152 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" event={"ID":"52f5a729-05d1-4f84-a216-1df3233af57d","Type":"ContainerStarted","Data":"c196595136e13d3c4c63ae7c08c87ab795a910aa0f4343c6ca13e03e51955a22"} Nov 21 14:18:23 crc kubenswrapper[5133]: I1121 14:18:23.969177 5133 scope.go:117] "RemoveContainer" containerID="532d197f04bffdd4c14ef24edeeddc3a83f198653923e9096885d7f28ddecf77" Nov 21 14:18:46 crc kubenswrapper[5133]: I1121 14:18:46.483353 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-qg4k6"] Nov 21 14:18:46 crc kubenswrapper[5133]: E1121 14:18:46.490276 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb8ce05b-7db5-4f97-8f82-93a7f9c00c83" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 21 14:18:46 crc kubenswrapper[5133]: I1121 14:18:46.490488 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb8ce05b-7db5-4f97-8f82-93a7f9c00c83" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 21 14:18:46 crc kubenswrapper[5133]: I1121 14:18:46.490820 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="cb8ce05b-7db5-4f97-8f82-93a7f9c00c83" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 21 14:18:46 crc kubenswrapper[5133]: I1121 14:18:46.492572 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qg4k6" Nov 21 14:18:46 crc kubenswrapper[5133]: I1121 14:18:46.504302 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qg4k6"] Nov 21 14:18:46 crc kubenswrapper[5133]: I1121 14:18:46.618177 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b3a655b-251b-4129-a1e3-bce83f765c83-utilities\") pod \"certified-operators-qg4k6\" (UID: \"0b3a655b-251b-4129-a1e3-bce83f765c83\") " pod="openshift-marketplace/certified-operators-qg4k6" Nov 21 14:18:46 crc kubenswrapper[5133]: I1121 14:18:46.618530 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b3a655b-251b-4129-a1e3-bce83f765c83-catalog-content\") pod \"certified-operators-qg4k6\" (UID: \"0b3a655b-251b-4129-a1e3-bce83f765c83\") " pod="openshift-marketplace/certified-operators-qg4k6" Nov 21 14:18:46 crc kubenswrapper[5133]: I1121 14:18:46.618561 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7xj95\" (UniqueName: \"kubernetes.io/projected/0b3a655b-251b-4129-a1e3-bce83f765c83-kube-api-access-7xj95\") pod \"certified-operators-qg4k6\" (UID: \"0b3a655b-251b-4129-a1e3-bce83f765c83\") " pod="openshift-marketplace/certified-operators-qg4k6" Nov 21 14:18:46 crc kubenswrapper[5133]: I1121 14:18:46.720223 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b3a655b-251b-4129-a1e3-bce83f765c83-utilities\") pod \"certified-operators-qg4k6\" (UID: \"0b3a655b-251b-4129-a1e3-bce83f765c83\") " pod="openshift-marketplace/certified-operators-qg4k6" Nov 21 14:18:46 crc kubenswrapper[5133]: I1121 14:18:46.720350 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b3a655b-251b-4129-a1e3-bce83f765c83-catalog-content\") pod \"certified-operators-qg4k6\" (UID: \"0b3a655b-251b-4129-a1e3-bce83f765c83\") " pod="openshift-marketplace/certified-operators-qg4k6" Nov 21 14:18:46 crc kubenswrapper[5133]: I1121 14:18:46.720379 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7xj95\" (UniqueName: \"kubernetes.io/projected/0b3a655b-251b-4129-a1e3-bce83f765c83-kube-api-access-7xj95\") pod \"certified-operators-qg4k6\" (UID: \"0b3a655b-251b-4129-a1e3-bce83f765c83\") " pod="openshift-marketplace/certified-operators-qg4k6" Nov 21 14:18:46 crc kubenswrapper[5133]: I1121 14:18:46.721308 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b3a655b-251b-4129-a1e3-bce83f765c83-utilities\") pod \"certified-operators-qg4k6\" (UID: \"0b3a655b-251b-4129-a1e3-bce83f765c83\") " pod="openshift-marketplace/certified-operators-qg4k6" Nov 21 14:18:46 crc kubenswrapper[5133]: I1121 14:18:46.721529 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b3a655b-251b-4129-a1e3-bce83f765c83-catalog-content\") pod \"certified-operators-qg4k6\" (UID: \"0b3a655b-251b-4129-a1e3-bce83f765c83\") " pod="openshift-marketplace/certified-operators-qg4k6" Nov 21 14:18:46 crc kubenswrapper[5133]: I1121 14:18:46.744589 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7xj95\" (UniqueName: \"kubernetes.io/projected/0b3a655b-251b-4129-a1e3-bce83f765c83-kube-api-access-7xj95\") pod \"certified-operators-qg4k6\" (UID: \"0b3a655b-251b-4129-a1e3-bce83f765c83\") " pod="openshift-marketplace/certified-operators-qg4k6" Nov 21 14:18:46 crc kubenswrapper[5133]: I1121 14:18:46.812055 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qg4k6" Nov 21 14:18:47 crc kubenswrapper[5133]: I1121 14:18:47.366194 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qg4k6"] Nov 21 14:18:47 crc kubenswrapper[5133]: W1121 14:18:47.373110 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0b3a655b_251b_4129_a1e3_bce83f765c83.slice/crio-4daac0999746ead2027fe3558b90b1f00ed39bd4708a8f1b4160c9cb22930395 WatchSource:0}: Error finding container 4daac0999746ead2027fe3558b90b1f00ed39bd4708a8f1b4160c9cb22930395: Status 404 returned error can't find the container with id 4daac0999746ead2027fe3558b90b1f00ed39bd4708a8f1b4160c9cb22930395 Nov 21 14:18:48 crc kubenswrapper[5133]: I1121 14:18:48.210303 5133 generic.go:334] "Generic (PLEG): container finished" podID="0b3a655b-251b-4129-a1e3-bce83f765c83" containerID="3b326575b7b9691714ffd8872ec4712968b7554c45d77fa5fab7f0b05e903892" exitCode=0 Nov 21 14:18:48 crc kubenswrapper[5133]: I1121 14:18:48.210570 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qg4k6" event={"ID":"0b3a655b-251b-4129-a1e3-bce83f765c83","Type":"ContainerDied","Data":"3b326575b7b9691714ffd8872ec4712968b7554c45d77fa5fab7f0b05e903892"} Nov 21 14:18:48 crc kubenswrapper[5133]: I1121 14:18:48.212189 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qg4k6" event={"ID":"0b3a655b-251b-4129-a1e3-bce83f765c83","Type":"ContainerStarted","Data":"4daac0999746ead2027fe3558b90b1f00ed39bd4708a8f1b4160c9cb22930395"} Nov 21 14:18:48 crc kubenswrapper[5133]: I1121 14:18:48.213555 5133 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 21 14:18:51 crc kubenswrapper[5133]: I1121 14:18:51.269754 5133 generic.go:334] "Generic (PLEG): container finished" podID="0b3a655b-251b-4129-a1e3-bce83f765c83" containerID="941fbb8934b23065ee86476a9e8be1b9126aac27a9ab2f35d7ef16f20264981b" exitCode=0 Nov 21 14:18:51 crc kubenswrapper[5133]: I1121 14:18:51.270556 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qg4k6" event={"ID":"0b3a655b-251b-4129-a1e3-bce83f765c83","Type":"ContainerDied","Data":"941fbb8934b23065ee86476a9e8be1b9126aac27a9ab2f35d7ef16f20264981b"} Nov 21 14:18:55 crc kubenswrapper[5133]: I1121 14:18:55.313234 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qg4k6" event={"ID":"0b3a655b-251b-4129-a1e3-bce83f765c83","Type":"ContainerStarted","Data":"fa06dcd950c9744ba7b0398dfc85cf6c2079a5fa421aca92c3c05659e0f88501"} Nov 21 14:18:55 crc kubenswrapper[5133]: I1121 14:18:55.348564 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-qg4k6" podStartSLOduration=3.460602239 podStartE2EDuration="9.3485283s" podCreationTimestamp="2025-11-21 14:18:46 +0000 UTC" firstStartedPulling="2025-11-21 14:18:48.213305111 +0000 UTC m=+2188.011137349" lastFinishedPulling="2025-11-21 14:18:54.101231142 +0000 UTC m=+2193.899063410" observedRunningTime="2025-11-21 14:18:55.333485084 +0000 UTC m=+2195.131317392" watchObservedRunningTime="2025-11-21 14:18:55.3485283 +0000 UTC m=+2195.146360588" Nov 21 14:18:56 crc kubenswrapper[5133]: I1121 14:18:56.813210 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-qg4k6" Nov 21 14:18:56 crc kubenswrapper[5133]: I1121 14:18:56.813621 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-qg4k6" Nov 21 14:18:56 crc kubenswrapper[5133]: I1121 14:18:56.889990 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-qg4k6" Nov 21 14:19:06 crc kubenswrapper[5133]: I1121 14:19:06.899252 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-qg4k6" Nov 21 14:19:07 crc kubenswrapper[5133]: I1121 14:19:07.000649 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-qg4k6"] Nov 21 14:19:07 crc kubenswrapper[5133]: I1121 14:19:07.892542 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-qg4k6" podUID="0b3a655b-251b-4129-a1e3-bce83f765c83" containerName="registry-server" containerID="cri-o://fa06dcd950c9744ba7b0398dfc85cf6c2079a5fa421aca92c3c05659e0f88501" gracePeriod=2 Nov 21 14:19:08 crc kubenswrapper[5133]: I1121 14:19:08.370319 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qg4k6" Nov 21 14:19:08 crc kubenswrapper[5133]: I1121 14:19:08.510695 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b3a655b-251b-4129-a1e3-bce83f765c83-utilities\") pod \"0b3a655b-251b-4129-a1e3-bce83f765c83\" (UID: \"0b3a655b-251b-4129-a1e3-bce83f765c83\") " Nov 21 14:19:08 crc kubenswrapper[5133]: I1121 14:19:08.510791 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b3a655b-251b-4129-a1e3-bce83f765c83-catalog-content\") pod \"0b3a655b-251b-4129-a1e3-bce83f765c83\" (UID: \"0b3a655b-251b-4129-a1e3-bce83f765c83\") " Nov 21 14:19:08 crc kubenswrapper[5133]: I1121 14:19:08.510816 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7xj95\" (UniqueName: \"kubernetes.io/projected/0b3a655b-251b-4129-a1e3-bce83f765c83-kube-api-access-7xj95\") pod \"0b3a655b-251b-4129-a1e3-bce83f765c83\" (UID: \"0b3a655b-251b-4129-a1e3-bce83f765c83\") " Nov 21 14:19:08 crc kubenswrapper[5133]: I1121 14:19:08.511662 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0b3a655b-251b-4129-a1e3-bce83f765c83-utilities" (OuterVolumeSpecName: "utilities") pod "0b3a655b-251b-4129-a1e3-bce83f765c83" (UID: "0b3a655b-251b-4129-a1e3-bce83f765c83"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:19:08 crc kubenswrapper[5133]: I1121 14:19:08.518613 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b3a655b-251b-4129-a1e3-bce83f765c83-kube-api-access-7xj95" (OuterVolumeSpecName: "kube-api-access-7xj95") pod "0b3a655b-251b-4129-a1e3-bce83f765c83" (UID: "0b3a655b-251b-4129-a1e3-bce83f765c83"). InnerVolumeSpecName "kube-api-access-7xj95". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:19:08 crc kubenswrapper[5133]: I1121 14:19:08.581646 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0b3a655b-251b-4129-a1e3-bce83f765c83-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0b3a655b-251b-4129-a1e3-bce83f765c83" (UID: "0b3a655b-251b-4129-a1e3-bce83f765c83"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:19:08 crc kubenswrapper[5133]: I1121 14:19:08.612984 5133 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b3a655b-251b-4129-a1e3-bce83f765c83-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 14:19:08 crc kubenswrapper[5133]: I1121 14:19:08.613046 5133 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b3a655b-251b-4129-a1e3-bce83f765c83-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 14:19:08 crc kubenswrapper[5133]: I1121 14:19:08.613063 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7xj95\" (UniqueName: \"kubernetes.io/projected/0b3a655b-251b-4129-a1e3-bce83f765c83-kube-api-access-7xj95\") on node \"crc\" DevicePath \"\"" Nov 21 14:19:08 crc kubenswrapper[5133]: I1121 14:19:08.903071 5133 generic.go:334] "Generic (PLEG): container finished" podID="0b3a655b-251b-4129-a1e3-bce83f765c83" containerID="fa06dcd950c9744ba7b0398dfc85cf6c2079a5fa421aca92c3c05659e0f88501" exitCode=0 Nov 21 14:19:08 crc kubenswrapper[5133]: I1121 14:19:08.903129 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qg4k6" event={"ID":"0b3a655b-251b-4129-a1e3-bce83f765c83","Type":"ContainerDied","Data":"fa06dcd950c9744ba7b0398dfc85cf6c2079a5fa421aca92c3c05659e0f88501"} Nov 21 14:19:08 crc kubenswrapper[5133]: I1121 14:19:08.903177 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qg4k6" Nov 21 14:19:08 crc kubenswrapper[5133]: I1121 14:19:08.903200 5133 scope.go:117] "RemoveContainer" containerID="fa06dcd950c9744ba7b0398dfc85cf6c2079a5fa421aca92c3c05659e0f88501" Nov 21 14:19:08 crc kubenswrapper[5133]: I1121 14:19:08.903188 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qg4k6" event={"ID":"0b3a655b-251b-4129-a1e3-bce83f765c83","Type":"ContainerDied","Data":"4daac0999746ead2027fe3558b90b1f00ed39bd4708a8f1b4160c9cb22930395"} Nov 21 14:19:08 crc kubenswrapper[5133]: I1121 14:19:08.929248 5133 scope.go:117] "RemoveContainer" containerID="941fbb8934b23065ee86476a9e8be1b9126aac27a9ab2f35d7ef16f20264981b" Nov 21 14:19:08 crc kubenswrapper[5133]: I1121 14:19:08.964294 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-qg4k6"] Nov 21 14:19:08 crc kubenswrapper[5133]: I1121 14:19:08.975919 5133 scope.go:117] "RemoveContainer" containerID="3b326575b7b9691714ffd8872ec4712968b7554c45d77fa5fab7f0b05e903892" Nov 21 14:19:08 crc kubenswrapper[5133]: I1121 14:19:08.976541 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-qg4k6"] Nov 21 14:19:09 crc kubenswrapper[5133]: I1121 14:19:09.009272 5133 scope.go:117] "RemoveContainer" containerID="fa06dcd950c9744ba7b0398dfc85cf6c2079a5fa421aca92c3c05659e0f88501" Nov 21 14:19:09 crc kubenswrapper[5133]: E1121 14:19:09.009747 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fa06dcd950c9744ba7b0398dfc85cf6c2079a5fa421aca92c3c05659e0f88501\": container with ID starting with fa06dcd950c9744ba7b0398dfc85cf6c2079a5fa421aca92c3c05659e0f88501 not found: ID does not exist" containerID="fa06dcd950c9744ba7b0398dfc85cf6c2079a5fa421aca92c3c05659e0f88501" Nov 21 14:19:09 crc kubenswrapper[5133]: I1121 14:19:09.009810 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fa06dcd950c9744ba7b0398dfc85cf6c2079a5fa421aca92c3c05659e0f88501"} err="failed to get container status \"fa06dcd950c9744ba7b0398dfc85cf6c2079a5fa421aca92c3c05659e0f88501\": rpc error: code = NotFound desc = could not find container \"fa06dcd950c9744ba7b0398dfc85cf6c2079a5fa421aca92c3c05659e0f88501\": container with ID starting with fa06dcd950c9744ba7b0398dfc85cf6c2079a5fa421aca92c3c05659e0f88501 not found: ID does not exist" Nov 21 14:19:09 crc kubenswrapper[5133]: I1121 14:19:09.009854 5133 scope.go:117] "RemoveContainer" containerID="941fbb8934b23065ee86476a9e8be1b9126aac27a9ab2f35d7ef16f20264981b" Nov 21 14:19:09 crc kubenswrapper[5133]: E1121 14:19:09.010380 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"941fbb8934b23065ee86476a9e8be1b9126aac27a9ab2f35d7ef16f20264981b\": container with ID starting with 941fbb8934b23065ee86476a9e8be1b9126aac27a9ab2f35d7ef16f20264981b not found: ID does not exist" containerID="941fbb8934b23065ee86476a9e8be1b9126aac27a9ab2f35d7ef16f20264981b" Nov 21 14:19:09 crc kubenswrapper[5133]: I1121 14:19:09.010410 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"941fbb8934b23065ee86476a9e8be1b9126aac27a9ab2f35d7ef16f20264981b"} err="failed to get container status \"941fbb8934b23065ee86476a9e8be1b9126aac27a9ab2f35d7ef16f20264981b\": rpc error: code = NotFound desc = could not find container \"941fbb8934b23065ee86476a9e8be1b9126aac27a9ab2f35d7ef16f20264981b\": container with ID starting with 941fbb8934b23065ee86476a9e8be1b9126aac27a9ab2f35d7ef16f20264981b not found: ID does not exist" Nov 21 14:19:09 crc kubenswrapper[5133]: I1121 14:19:09.010430 5133 scope.go:117] "RemoveContainer" containerID="3b326575b7b9691714ffd8872ec4712968b7554c45d77fa5fab7f0b05e903892" Nov 21 14:19:09 crc kubenswrapper[5133]: E1121 14:19:09.010800 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3b326575b7b9691714ffd8872ec4712968b7554c45d77fa5fab7f0b05e903892\": container with ID starting with 3b326575b7b9691714ffd8872ec4712968b7554c45d77fa5fab7f0b05e903892 not found: ID does not exist" containerID="3b326575b7b9691714ffd8872ec4712968b7554c45d77fa5fab7f0b05e903892" Nov 21 14:19:09 crc kubenswrapper[5133]: I1121 14:19:09.010834 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3b326575b7b9691714ffd8872ec4712968b7554c45d77fa5fab7f0b05e903892"} err="failed to get container status \"3b326575b7b9691714ffd8872ec4712968b7554c45d77fa5fab7f0b05e903892\": rpc error: code = NotFound desc = could not find container \"3b326575b7b9691714ffd8872ec4712968b7554c45d77fa5fab7f0b05e903892\": container with ID starting with 3b326575b7b9691714ffd8872ec4712968b7554c45d77fa5fab7f0b05e903892 not found: ID does not exist" Nov 21 14:19:10 crc kubenswrapper[5133]: I1121 14:19:10.480670 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b3a655b-251b-4129-a1e3-bce83f765c83" path="/var/lib/kubelet/pods/0b3a655b-251b-4129-a1e3-bce83f765c83/volumes" Nov 21 14:20:00 crc kubenswrapper[5133]: I1121 14:20:00.487620 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-jx6qq"] Nov 21 14:20:00 crc kubenswrapper[5133]: E1121 14:20:00.489198 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b3a655b-251b-4129-a1e3-bce83f765c83" containerName="registry-server" Nov 21 14:20:00 crc kubenswrapper[5133]: I1121 14:20:00.489224 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b3a655b-251b-4129-a1e3-bce83f765c83" containerName="registry-server" Nov 21 14:20:00 crc kubenswrapper[5133]: E1121 14:20:00.489257 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b3a655b-251b-4129-a1e3-bce83f765c83" containerName="extract-utilities" Nov 21 14:20:00 crc kubenswrapper[5133]: I1121 14:20:00.489272 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b3a655b-251b-4129-a1e3-bce83f765c83" containerName="extract-utilities" Nov 21 14:20:00 crc kubenswrapper[5133]: E1121 14:20:00.489357 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b3a655b-251b-4129-a1e3-bce83f765c83" containerName="extract-content" Nov 21 14:20:00 crc kubenswrapper[5133]: I1121 14:20:00.489373 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b3a655b-251b-4129-a1e3-bce83f765c83" containerName="extract-content" Nov 21 14:20:00 crc kubenswrapper[5133]: I1121 14:20:00.489741 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="0b3a655b-251b-4129-a1e3-bce83f765c83" containerName="registry-server" Nov 21 14:20:00 crc kubenswrapper[5133]: I1121 14:20:00.498409 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jx6qq" Nov 21 14:20:00 crc kubenswrapper[5133]: I1121 14:20:00.503214 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jx6qq"] Nov 21 14:20:00 crc kubenswrapper[5133]: I1121 14:20:00.601700 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jbhgj\" (UniqueName: \"kubernetes.io/projected/f71b3019-e5cd-4056-8a0b-ac77eac900e9-kube-api-access-jbhgj\") pod \"redhat-marketplace-jx6qq\" (UID: \"f71b3019-e5cd-4056-8a0b-ac77eac900e9\") " pod="openshift-marketplace/redhat-marketplace-jx6qq" Nov 21 14:20:00 crc kubenswrapper[5133]: I1121 14:20:00.602572 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f71b3019-e5cd-4056-8a0b-ac77eac900e9-utilities\") pod \"redhat-marketplace-jx6qq\" (UID: \"f71b3019-e5cd-4056-8a0b-ac77eac900e9\") " pod="openshift-marketplace/redhat-marketplace-jx6qq" Nov 21 14:20:00 crc kubenswrapper[5133]: I1121 14:20:00.602934 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f71b3019-e5cd-4056-8a0b-ac77eac900e9-catalog-content\") pod \"redhat-marketplace-jx6qq\" (UID: \"f71b3019-e5cd-4056-8a0b-ac77eac900e9\") " pod="openshift-marketplace/redhat-marketplace-jx6qq" Nov 21 14:20:00 crc kubenswrapper[5133]: I1121 14:20:00.705103 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f71b3019-e5cd-4056-8a0b-ac77eac900e9-utilities\") pod \"redhat-marketplace-jx6qq\" (UID: \"f71b3019-e5cd-4056-8a0b-ac77eac900e9\") " pod="openshift-marketplace/redhat-marketplace-jx6qq" Nov 21 14:20:00 crc kubenswrapper[5133]: I1121 14:20:00.705242 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f71b3019-e5cd-4056-8a0b-ac77eac900e9-catalog-content\") pod \"redhat-marketplace-jx6qq\" (UID: \"f71b3019-e5cd-4056-8a0b-ac77eac900e9\") " pod="openshift-marketplace/redhat-marketplace-jx6qq" Nov 21 14:20:00 crc kubenswrapper[5133]: I1121 14:20:00.705437 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jbhgj\" (UniqueName: \"kubernetes.io/projected/f71b3019-e5cd-4056-8a0b-ac77eac900e9-kube-api-access-jbhgj\") pod \"redhat-marketplace-jx6qq\" (UID: \"f71b3019-e5cd-4056-8a0b-ac77eac900e9\") " pod="openshift-marketplace/redhat-marketplace-jx6qq" Nov 21 14:20:00 crc kubenswrapper[5133]: I1121 14:20:00.705885 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f71b3019-e5cd-4056-8a0b-ac77eac900e9-utilities\") pod \"redhat-marketplace-jx6qq\" (UID: \"f71b3019-e5cd-4056-8a0b-ac77eac900e9\") " pod="openshift-marketplace/redhat-marketplace-jx6qq" Nov 21 14:20:00 crc kubenswrapper[5133]: I1121 14:20:00.706691 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f71b3019-e5cd-4056-8a0b-ac77eac900e9-catalog-content\") pod \"redhat-marketplace-jx6qq\" (UID: \"f71b3019-e5cd-4056-8a0b-ac77eac900e9\") " pod="openshift-marketplace/redhat-marketplace-jx6qq" Nov 21 14:20:00 crc kubenswrapper[5133]: I1121 14:20:00.741407 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jbhgj\" (UniqueName: \"kubernetes.io/projected/f71b3019-e5cd-4056-8a0b-ac77eac900e9-kube-api-access-jbhgj\") pod \"redhat-marketplace-jx6qq\" (UID: \"f71b3019-e5cd-4056-8a0b-ac77eac900e9\") " pod="openshift-marketplace/redhat-marketplace-jx6qq" Nov 21 14:20:00 crc kubenswrapper[5133]: I1121 14:20:00.825497 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jx6qq" Nov 21 14:20:01 crc kubenswrapper[5133]: I1121 14:20:01.309967 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jx6qq"] Nov 21 14:20:01 crc kubenswrapper[5133]: I1121 14:20:01.545106 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jx6qq" event={"ID":"f71b3019-e5cd-4056-8a0b-ac77eac900e9","Type":"ContainerStarted","Data":"2efb975f4a30a6566b8f27f7fa9d67a21efd7aafbddcde370cce1155e37c2c0b"} Nov 21 14:20:02 crc kubenswrapper[5133]: I1121 14:20:02.555229 5133 generic.go:334] "Generic (PLEG): container finished" podID="f71b3019-e5cd-4056-8a0b-ac77eac900e9" containerID="91a259d162002282e7b484a0a12df302b7ecedb424a20142e9d56a0959e2ffac" exitCode=0 Nov 21 14:20:02 crc kubenswrapper[5133]: I1121 14:20:02.555282 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jx6qq" event={"ID":"f71b3019-e5cd-4056-8a0b-ac77eac900e9","Type":"ContainerDied","Data":"91a259d162002282e7b484a0a12df302b7ecedb424a20142e9d56a0959e2ffac"} Nov 21 14:20:03 crc kubenswrapper[5133]: I1121 14:20:03.566274 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jx6qq" event={"ID":"f71b3019-e5cd-4056-8a0b-ac77eac900e9","Type":"ContainerStarted","Data":"87aed16d4c15b296a3ed8e23493f56d6feb57f09eb28241c1030da97e148b0cf"} Nov 21 14:20:04 crc kubenswrapper[5133]: I1121 14:20:04.576331 5133 generic.go:334] "Generic (PLEG): container finished" podID="f71b3019-e5cd-4056-8a0b-ac77eac900e9" containerID="87aed16d4c15b296a3ed8e23493f56d6feb57f09eb28241c1030da97e148b0cf" exitCode=0 Nov 21 14:20:04 crc kubenswrapper[5133]: I1121 14:20:04.576468 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jx6qq" event={"ID":"f71b3019-e5cd-4056-8a0b-ac77eac900e9","Type":"ContainerDied","Data":"87aed16d4c15b296a3ed8e23493f56d6feb57f09eb28241c1030da97e148b0cf"} Nov 21 14:20:05 crc kubenswrapper[5133]: I1121 14:20:05.586860 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jx6qq" event={"ID":"f71b3019-e5cd-4056-8a0b-ac77eac900e9","Type":"ContainerStarted","Data":"6f2c6384359ecf968450dd4c2b4bfc8c8fe1dcc4daf464adce468fa4ac14e263"} Nov 21 14:20:05 crc kubenswrapper[5133]: I1121 14:20:05.625221 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-jx6qq" podStartSLOduration=3.172127034 podStartE2EDuration="5.625188441s" podCreationTimestamp="2025-11-21 14:20:00 +0000 UTC" firstStartedPulling="2025-11-21 14:20:02.558577532 +0000 UTC m=+2262.356409780" lastFinishedPulling="2025-11-21 14:20:05.011638939 +0000 UTC m=+2264.809471187" observedRunningTime="2025-11-21 14:20:05.619445456 +0000 UTC m=+2265.417277764" watchObservedRunningTime="2025-11-21 14:20:05.625188441 +0000 UTC m=+2265.423020729" Nov 21 14:20:10 crc kubenswrapper[5133]: I1121 14:20:10.826383 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-jx6qq" Nov 21 14:20:10 crc kubenswrapper[5133]: I1121 14:20:10.826952 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-jx6qq" Nov 21 14:20:10 crc kubenswrapper[5133]: I1121 14:20:10.875272 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-jx6qq" Nov 21 14:20:11 crc kubenswrapper[5133]: I1121 14:20:11.724629 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-jx6qq" Nov 21 14:20:11 crc kubenswrapper[5133]: I1121 14:20:11.807244 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-jx6qq"] Nov 21 14:20:13 crc kubenswrapper[5133]: I1121 14:20:13.661977 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-jx6qq" podUID="f71b3019-e5cd-4056-8a0b-ac77eac900e9" containerName="registry-server" containerID="cri-o://6f2c6384359ecf968450dd4c2b4bfc8c8fe1dcc4daf464adce468fa4ac14e263" gracePeriod=2 Nov 21 14:20:14 crc kubenswrapper[5133]: I1121 14:20:14.144481 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jx6qq" Nov 21 14:20:14 crc kubenswrapper[5133]: I1121 14:20:14.275156 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f71b3019-e5cd-4056-8a0b-ac77eac900e9-utilities\") pod \"f71b3019-e5cd-4056-8a0b-ac77eac900e9\" (UID: \"f71b3019-e5cd-4056-8a0b-ac77eac900e9\") " Nov 21 14:20:14 crc kubenswrapper[5133]: I1121 14:20:14.275525 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f71b3019-e5cd-4056-8a0b-ac77eac900e9-catalog-content\") pod \"f71b3019-e5cd-4056-8a0b-ac77eac900e9\" (UID: \"f71b3019-e5cd-4056-8a0b-ac77eac900e9\") " Nov 21 14:20:14 crc kubenswrapper[5133]: I1121 14:20:14.275573 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jbhgj\" (UniqueName: \"kubernetes.io/projected/f71b3019-e5cd-4056-8a0b-ac77eac900e9-kube-api-access-jbhgj\") pod \"f71b3019-e5cd-4056-8a0b-ac77eac900e9\" (UID: \"f71b3019-e5cd-4056-8a0b-ac77eac900e9\") " Nov 21 14:20:14 crc kubenswrapper[5133]: I1121 14:20:14.276570 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f71b3019-e5cd-4056-8a0b-ac77eac900e9-utilities" (OuterVolumeSpecName: "utilities") pod "f71b3019-e5cd-4056-8a0b-ac77eac900e9" (UID: "f71b3019-e5cd-4056-8a0b-ac77eac900e9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:20:14 crc kubenswrapper[5133]: I1121 14:20:14.281611 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f71b3019-e5cd-4056-8a0b-ac77eac900e9-kube-api-access-jbhgj" (OuterVolumeSpecName: "kube-api-access-jbhgj") pod "f71b3019-e5cd-4056-8a0b-ac77eac900e9" (UID: "f71b3019-e5cd-4056-8a0b-ac77eac900e9"). InnerVolumeSpecName "kube-api-access-jbhgj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:20:14 crc kubenswrapper[5133]: I1121 14:20:14.295478 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f71b3019-e5cd-4056-8a0b-ac77eac900e9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f71b3019-e5cd-4056-8a0b-ac77eac900e9" (UID: "f71b3019-e5cd-4056-8a0b-ac77eac900e9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:20:14 crc kubenswrapper[5133]: I1121 14:20:14.378225 5133 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f71b3019-e5cd-4056-8a0b-ac77eac900e9-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 14:20:14 crc kubenswrapper[5133]: I1121 14:20:14.378268 5133 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f71b3019-e5cd-4056-8a0b-ac77eac900e9-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 14:20:14 crc kubenswrapper[5133]: I1121 14:20:14.378286 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jbhgj\" (UniqueName: \"kubernetes.io/projected/f71b3019-e5cd-4056-8a0b-ac77eac900e9-kube-api-access-jbhgj\") on node \"crc\" DevicePath \"\"" Nov 21 14:20:14 crc kubenswrapper[5133]: I1121 14:20:14.679057 5133 generic.go:334] "Generic (PLEG): container finished" podID="f71b3019-e5cd-4056-8a0b-ac77eac900e9" containerID="6f2c6384359ecf968450dd4c2b4bfc8c8fe1dcc4daf464adce468fa4ac14e263" exitCode=0 Nov 21 14:20:14 crc kubenswrapper[5133]: I1121 14:20:14.679124 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jx6qq" event={"ID":"f71b3019-e5cd-4056-8a0b-ac77eac900e9","Type":"ContainerDied","Data":"6f2c6384359ecf968450dd4c2b4bfc8c8fe1dcc4daf464adce468fa4ac14e263"} Nov 21 14:20:14 crc kubenswrapper[5133]: I1121 14:20:14.679548 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jx6qq" event={"ID":"f71b3019-e5cd-4056-8a0b-ac77eac900e9","Type":"ContainerDied","Data":"2efb975f4a30a6566b8f27f7fa9d67a21efd7aafbddcde370cce1155e37c2c0b"} Nov 21 14:20:14 crc kubenswrapper[5133]: I1121 14:20:14.679189 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jx6qq" Nov 21 14:20:14 crc kubenswrapper[5133]: I1121 14:20:14.679597 5133 scope.go:117] "RemoveContainer" containerID="6f2c6384359ecf968450dd4c2b4bfc8c8fe1dcc4daf464adce468fa4ac14e263" Nov 21 14:20:14 crc kubenswrapper[5133]: I1121 14:20:14.708614 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-jx6qq"] Nov 21 14:20:14 crc kubenswrapper[5133]: I1121 14:20:14.716822 5133 scope.go:117] "RemoveContainer" containerID="87aed16d4c15b296a3ed8e23493f56d6feb57f09eb28241c1030da97e148b0cf" Nov 21 14:20:14 crc kubenswrapper[5133]: I1121 14:20:14.721704 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-jx6qq"] Nov 21 14:20:14 crc kubenswrapper[5133]: I1121 14:20:14.740666 5133 scope.go:117] "RemoveContainer" containerID="91a259d162002282e7b484a0a12df302b7ecedb424a20142e9d56a0959e2ffac" Nov 21 14:20:14 crc kubenswrapper[5133]: I1121 14:20:14.790947 5133 scope.go:117] "RemoveContainer" containerID="6f2c6384359ecf968450dd4c2b4bfc8c8fe1dcc4daf464adce468fa4ac14e263" Nov 21 14:20:14 crc kubenswrapper[5133]: E1121 14:20:14.791711 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6f2c6384359ecf968450dd4c2b4bfc8c8fe1dcc4daf464adce468fa4ac14e263\": container with ID starting with 6f2c6384359ecf968450dd4c2b4bfc8c8fe1dcc4daf464adce468fa4ac14e263 not found: ID does not exist" containerID="6f2c6384359ecf968450dd4c2b4bfc8c8fe1dcc4daf464adce468fa4ac14e263" Nov 21 14:20:14 crc kubenswrapper[5133]: I1121 14:20:14.791763 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6f2c6384359ecf968450dd4c2b4bfc8c8fe1dcc4daf464adce468fa4ac14e263"} err="failed to get container status \"6f2c6384359ecf968450dd4c2b4bfc8c8fe1dcc4daf464adce468fa4ac14e263\": rpc error: code = NotFound desc = could not find container \"6f2c6384359ecf968450dd4c2b4bfc8c8fe1dcc4daf464adce468fa4ac14e263\": container with ID starting with 6f2c6384359ecf968450dd4c2b4bfc8c8fe1dcc4daf464adce468fa4ac14e263 not found: ID does not exist" Nov 21 14:20:14 crc kubenswrapper[5133]: I1121 14:20:14.791797 5133 scope.go:117] "RemoveContainer" containerID="87aed16d4c15b296a3ed8e23493f56d6feb57f09eb28241c1030da97e148b0cf" Nov 21 14:20:14 crc kubenswrapper[5133]: E1121 14:20:14.792310 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"87aed16d4c15b296a3ed8e23493f56d6feb57f09eb28241c1030da97e148b0cf\": container with ID starting with 87aed16d4c15b296a3ed8e23493f56d6feb57f09eb28241c1030da97e148b0cf not found: ID does not exist" containerID="87aed16d4c15b296a3ed8e23493f56d6feb57f09eb28241c1030da97e148b0cf" Nov 21 14:20:14 crc kubenswrapper[5133]: I1121 14:20:14.792341 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"87aed16d4c15b296a3ed8e23493f56d6feb57f09eb28241c1030da97e148b0cf"} err="failed to get container status \"87aed16d4c15b296a3ed8e23493f56d6feb57f09eb28241c1030da97e148b0cf\": rpc error: code = NotFound desc = could not find container \"87aed16d4c15b296a3ed8e23493f56d6feb57f09eb28241c1030da97e148b0cf\": container with ID starting with 87aed16d4c15b296a3ed8e23493f56d6feb57f09eb28241c1030da97e148b0cf not found: ID does not exist" Nov 21 14:20:14 crc kubenswrapper[5133]: I1121 14:20:14.792373 5133 scope.go:117] "RemoveContainer" containerID="91a259d162002282e7b484a0a12df302b7ecedb424a20142e9d56a0959e2ffac" Nov 21 14:20:14 crc kubenswrapper[5133]: E1121 14:20:14.792815 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"91a259d162002282e7b484a0a12df302b7ecedb424a20142e9d56a0959e2ffac\": container with ID starting with 91a259d162002282e7b484a0a12df302b7ecedb424a20142e9d56a0959e2ffac not found: ID does not exist" containerID="91a259d162002282e7b484a0a12df302b7ecedb424a20142e9d56a0959e2ffac" Nov 21 14:20:14 crc kubenswrapper[5133]: I1121 14:20:14.792853 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"91a259d162002282e7b484a0a12df302b7ecedb424a20142e9d56a0959e2ffac"} err="failed to get container status \"91a259d162002282e7b484a0a12df302b7ecedb424a20142e9d56a0959e2ffac\": rpc error: code = NotFound desc = could not find container \"91a259d162002282e7b484a0a12df302b7ecedb424a20142e9d56a0959e2ffac\": container with ID starting with 91a259d162002282e7b484a0a12df302b7ecedb424a20142e9d56a0959e2ffac not found: ID does not exist" Nov 21 14:20:16 crc kubenswrapper[5133]: I1121 14:20:16.474657 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f71b3019-e5cd-4056-8a0b-ac77eac900e9" path="/var/lib/kubelet/pods/f71b3019-e5cd-4056-8a0b-ac77eac900e9/volumes" Nov 21 14:20:23 crc kubenswrapper[5133]: I1121 14:20:23.310527 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 14:20:23 crc kubenswrapper[5133]: I1121 14:20:23.311218 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 14:20:53 crc kubenswrapper[5133]: I1121 14:20:53.310962 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 14:20:53 crc kubenswrapper[5133]: I1121 14:20:53.311946 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 14:21:23 crc kubenswrapper[5133]: I1121 14:21:23.311561 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 14:21:23 crc kubenswrapper[5133]: I1121 14:21:23.312191 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 14:21:23 crc kubenswrapper[5133]: I1121 14:21:23.312246 5133 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" Nov 21 14:21:23 crc kubenswrapper[5133]: I1121 14:21:23.314479 5133 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c196595136e13d3c4c63ae7c08c87ab795a910aa0f4343c6ca13e03e51955a22"} pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 14:21:23 crc kubenswrapper[5133]: I1121 14:21:23.314686 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" containerID="cri-o://c196595136e13d3c4c63ae7c08c87ab795a910aa0f4343c6ca13e03e51955a22" gracePeriod=600 Nov 21 14:21:23 crc kubenswrapper[5133]: E1121 14:21:23.449611 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:21:23 crc kubenswrapper[5133]: I1121 14:21:23.463956 5133 generic.go:334] "Generic (PLEG): container finished" podID="52f5a729-05d1-4f84-a216-1df3233af57d" containerID="c196595136e13d3c4c63ae7c08c87ab795a910aa0f4343c6ca13e03e51955a22" exitCode=0 Nov 21 14:21:23 crc kubenswrapper[5133]: I1121 14:21:23.464046 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" event={"ID":"52f5a729-05d1-4f84-a216-1df3233af57d","Type":"ContainerDied","Data":"c196595136e13d3c4c63ae7c08c87ab795a910aa0f4343c6ca13e03e51955a22"} Nov 21 14:21:23 crc kubenswrapper[5133]: I1121 14:21:23.464109 5133 scope.go:117] "RemoveContainer" containerID="2906b97bc9f85b4f70f32fefe8fea2cab9f1e58520d8b56a7cb015384ab82031" Nov 21 14:21:23 crc kubenswrapper[5133]: I1121 14:21:23.465528 5133 scope.go:117] "RemoveContainer" containerID="c196595136e13d3c4c63ae7c08c87ab795a910aa0f4343c6ca13e03e51955a22" Nov 21 14:21:23 crc kubenswrapper[5133]: E1121 14:21:23.466454 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:21:36 crc kubenswrapper[5133]: I1121 14:21:36.459207 5133 scope.go:117] "RemoveContainer" containerID="c196595136e13d3c4c63ae7c08c87ab795a910aa0f4343c6ca13e03e51955a22" Nov 21 14:21:36 crc kubenswrapper[5133]: E1121 14:21:36.460537 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:21:51 crc kubenswrapper[5133]: I1121 14:21:51.457872 5133 scope.go:117] "RemoveContainer" containerID="c196595136e13d3c4c63ae7c08c87ab795a910aa0f4343c6ca13e03e51955a22" Nov 21 14:21:51 crc kubenswrapper[5133]: E1121 14:21:51.458857 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:22:06 crc kubenswrapper[5133]: I1121 14:22:06.457966 5133 scope.go:117] "RemoveContainer" containerID="c196595136e13d3c4c63ae7c08c87ab795a910aa0f4343c6ca13e03e51955a22" Nov 21 14:22:06 crc kubenswrapper[5133]: E1121 14:22:06.458853 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:22:20 crc kubenswrapper[5133]: I1121 14:22:20.458297 5133 scope.go:117] "RemoveContainer" containerID="c196595136e13d3c4c63ae7c08c87ab795a910aa0f4343c6ca13e03e51955a22" Nov 21 14:22:20 crc kubenswrapper[5133]: E1121 14:22:20.459236 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:22:33 crc kubenswrapper[5133]: I1121 14:22:33.458491 5133 scope.go:117] "RemoveContainer" containerID="c196595136e13d3c4c63ae7c08c87ab795a910aa0f4343c6ca13e03e51955a22" Nov 21 14:22:33 crc kubenswrapper[5133]: E1121 14:22:33.459775 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:22:47 crc kubenswrapper[5133]: I1121 14:22:47.458733 5133 scope.go:117] "RemoveContainer" containerID="c196595136e13d3c4c63ae7c08c87ab795a910aa0f4343c6ca13e03e51955a22" Nov 21 14:22:47 crc kubenswrapper[5133]: E1121 14:22:47.460352 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:22:58 crc kubenswrapper[5133]: I1121 14:22:58.458042 5133 scope.go:117] "RemoveContainer" containerID="c196595136e13d3c4c63ae7c08c87ab795a910aa0f4343c6ca13e03e51955a22" Nov 21 14:22:58 crc kubenswrapper[5133]: E1121 14:22:58.459169 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:23:10 crc kubenswrapper[5133]: I1121 14:23:10.457807 5133 scope.go:117] "RemoveContainer" containerID="c196595136e13d3c4c63ae7c08c87ab795a910aa0f4343c6ca13e03e51955a22" Nov 21 14:23:10 crc kubenswrapper[5133]: E1121 14:23:10.458666 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:23:24 crc kubenswrapper[5133]: I1121 14:23:24.458193 5133 scope.go:117] "RemoveContainer" containerID="c196595136e13d3c4c63ae7c08c87ab795a910aa0f4343c6ca13e03e51955a22" Nov 21 14:23:24 crc kubenswrapper[5133]: E1121 14:23:24.459207 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:23:38 crc kubenswrapper[5133]: I1121 14:23:38.459081 5133 scope.go:117] "RemoveContainer" containerID="c196595136e13d3c4c63ae7c08c87ab795a910aa0f4343c6ca13e03e51955a22" Nov 21 14:23:38 crc kubenswrapper[5133]: E1121 14:23:38.459765 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:23:49 crc kubenswrapper[5133]: I1121 14:23:49.457226 5133 scope.go:117] "RemoveContainer" containerID="c196595136e13d3c4c63ae7c08c87ab795a910aa0f4343c6ca13e03e51955a22" Nov 21 14:23:49 crc kubenswrapper[5133]: E1121 14:23:49.458065 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:23:57 crc kubenswrapper[5133]: I1121 14:23:57.585633 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-9w6lg"] Nov 21 14:23:57 crc kubenswrapper[5133]: E1121 14:23:57.586553 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f71b3019-e5cd-4056-8a0b-ac77eac900e9" containerName="registry-server" Nov 21 14:23:57 crc kubenswrapper[5133]: I1121 14:23:57.586565 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="f71b3019-e5cd-4056-8a0b-ac77eac900e9" containerName="registry-server" Nov 21 14:23:57 crc kubenswrapper[5133]: E1121 14:23:57.586576 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f71b3019-e5cd-4056-8a0b-ac77eac900e9" containerName="extract-utilities" Nov 21 14:23:57 crc kubenswrapper[5133]: I1121 14:23:57.586585 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="f71b3019-e5cd-4056-8a0b-ac77eac900e9" containerName="extract-utilities" Nov 21 14:23:57 crc kubenswrapper[5133]: E1121 14:23:57.586612 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f71b3019-e5cd-4056-8a0b-ac77eac900e9" containerName="extract-content" Nov 21 14:23:57 crc kubenswrapper[5133]: I1121 14:23:57.586618 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="f71b3019-e5cd-4056-8a0b-ac77eac900e9" containerName="extract-content" Nov 21 14:23:57 crc kubenswrapper[5133]: I1121 14:23:57.586777 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="f71b3019-e5cd-4056-8a0b-ac77eac900e9" containerName="registry-server" Nov 21 14:23:57 crc kubenswrapper[5133]: I1121 14:23:57.588035 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9w6lg" Nov 21 14:23:57 crc kubenswrapper[5133]: I1121 14:23:57.612455 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-9w6lg"] Nov 21 14:23:57 crc kubenswrapper[5133]: I1121 14:23:57.736517 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-thqf2\" (UniqueName: \"kubernetes.io/projected/a1149747-3645-4424-860e-7915422a24b5-kube-api-access-thqf2\") pod \"redhat-operators-9w6lg\" (UID: \"a1149747-3645-4424-860e-7915422a24b5\") " pod="openshift-marketplace/redhat-operators-9w6lg" Nov 21 14:23:57 crc kubenswrapper[5133]: I1121 14:23:57.736666 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a1149747-3645-4424-860e-7915422a24b5-catalog-content\") pod \"redhat-operators-9w6lg\" (UID: \"a1149747-3645-4424-860e-7915422a24b5\") " pod="openshift-marketplace/redhat-operators-9w6lg" Nov 21 14:23:57 crc kubenswrapper[5133]: I1121 14:23:57.736732 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a1149747-3645-4424-860e-7915422a24b5-utilities\") pod \"redhat-operators-9w6lg\" (UID: \"a1149747-3645-4424-860e-7915422a24b5\") " pod="openshift-marketplace/redhat-operators-9w6lg" Nov 21 14:23:57 crc kubenswrapper[5133]: I1121 14:23:57.838273 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a1149747-3645-4424-860e-7915422a24b5-catalog-content\") pod \"redhat-operators-9w6lg\" (UID: \"a1149747-3645-4424-860e-7915422a24b5\") " pod="openshift-marketplace/redhat-operators-9w6lg" Nov 21 14:23:57 crc kubenswrapper[5133]: I1121 14:23:57.838378 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a1149747-3645-4424-860e-7915422a24b5-utilities\") pod \"redhat-operators-9w6lg\" (UID: \"a1149747-3645-4424-860e-7915422a24b5\") " pod="openshift-marketplace/redhat-operators-9w6lg" Nov 21 14:23:57 crc kubenswrapper[5133]: I1121 14:23:57.838443 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-thqf2\" (UniqueName: \"kubernetes.io/projected/a1149747-3645-4424-860e-7915422a24b5-kube-api-access-thqf2\") pod \"redhat-operators-9w6lg\" (UID: \"a1149747-3645-4424-860e-7915422a24b5\") " pod="openshift-marketplace/redhat-operators-9w6lg" Nov 21 14:23:57 crc kubenswrapper[5133]: I1121 14:23:57.839230 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a1149747-3645-4424-860e-7915422a24b5-catalog-content\") pod \"redhat-operators-9w6lg\" (UID: \"a1149747-3645-4424-860e-7915422a24b5\") " pod="openshift-marketplace/redhat-operators-9w6lg" Nov 21 14:23:57 crc kubenswrapper[5133]: I1121 14:23:57.839481 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a1149747-3645-4424-860e-7915422a24b5-utilities\") pod \"redhat-operators-9w6lg\" (UID: \"a1149747-3645-4424-860e-7915422a24b5\") " pod="openshift-marketplace/redhat-operators-9w6lg" Nov 21 14:23:57 crc kubenswrapper[5133]: I1121 14:23:57.862540 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-thqf2\" (UniqueName: \"kubernetes.io/projected/a1149747-3645-4424-860e-7915422a24b5-kube-api-access-thqf2\") pod \"redhat-operators-9w6lg\" (UID: \"a1149747-3645-4424-860e-7915422a24b5\") " pod="openshift-marketplace/redhat-operators-9w6lg" Nov 21 14:23:57 crc kubenswrapper[5133]: I1121 14:23:57.956729 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9w6lg" Nov 21 14:23:58 crc kubenswrapper[5133]: I1121 14:23:58.447968 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-9w6lg"] Nov 21 14:23:59 crc kubenswrapper[5133]: I1121 14:23:59.128157 5133 generic.go:334] "Generic (PLEG): container finished" podID="a1149747-3645-4424-860e-7915422a24b5" containerID="e0b00150fd93b0f054d792892d001cd14c1b5bf1509ebfdf48d3793285b9077b" exitCode=0 Nov 21 14:23:59 crc kubenswrapper[5133]: I1121 14:23:59.128263 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9w6lg" event={"ID":"a1149747-3645-4424-860e-7915422a24b5","Type":"ContainerDied","Data":"e0b00150fd93b0f054d792892d001cd14c1b5bf1509ebfdf48d3793285b9077b"} Nov 21 14:23:59 crc kubenswrapper[5133]: I1121 14:23:59.128612 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9w6lg" event={"ID":"a1149747-3645-4424-860e-7915422a24b5","Type":"ContainerStarted","Data":"cfc68d7b3558a2d202038328ee96dc11f3f090872917aacbf875ab7d95267e00"} Nov 21 14:23:59 crc kubenswrapper[5133]: I1121 14:23:59.130445 5133 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 21 14:24:01 crc kubenswrapper[5133]: I1121 14:24:01.458324 5133 scope.go:117] "RemoveContainer" containerID="c196595136e13d3c4c63ae7c08c87ab795a910aa0f4343c6ca13e03e51955a22" Nov 21 14:24:01 crc kubenswrapper[5133]: E1121 14:24:01.459194 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:24:03 crc kubenswrapper[5133]: I1121 14:24:03.180205 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9w6lg" event={"ID":"a1149747-3645-4424-860e-7915422a24b5","Type":"ContainerStarted","Data":"2118fa6b00dc5a8011600c895b0b4373d0ecf73f0b8f4abfbe1e24293705c757"} Nov 21 14:24:03 crc kubenswrapper[5133]: E1121 14:24:03.507478 5133 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda1149747_3645_4424_860e_7915422a24b5.slice/crio-2118fa6b00dc5a8011600c895b0b4373d0ecf73f0b8f4abfbe1e24293705c757.scope\": RecentStats: unable to find data in memory cache]" Nov 21 14:24:04 crc kubenswrapper[5133]: I1121 14:24:04.190501 5133 generic.go:334] "Generic (PLEG): container finished" podID="a1149747-3645-4424-860e-7915422a24b5" containerID="2118fa6b00dc5a8011600c895b0b4373d0ecf73f0b8f4abfbe1e24293705c757" exitCode=0 Nov 21 14:24:04 crc kubenswrapper[5133]: I1121 14:24:04.190556 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9w6lg" event={"ID":"a1149747-3645-4424-860e-7915422a24b5","Type":"ContainerDied","Data":"2118fa6b00dc5a8011600c895b0b4373d0ecf73f0b8f4abfbe1e24293705c757"} Nov 21 14:24:06 crc kubenswrapper[5133]: I1121 14:24:06.213340 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9w6lg" event={"ID":"a1149747-3645-4424-860e-7915422a24b5","Type":"ContainerStarted","Data":"afcf836ad5e9801d307998afd7b16b5a8b4b8ec92e3e6998aa5a052a722bd458"} Nov 21 14:24:06 crc kubenswrapper[5133]: I1121 14:24:06.242581 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-9w6lg" podStartSLOduration=3.299163321 podStartE2EDuration="9.242556329s" podCreationTimestamp="2025-11-21 14:23:57 +0000 UTC" firstStartedPulling="2025-11-21 14:23:59.130242268 +0000 UTC m=+2498.928074516" lastFinishedPulling="2025-11-21 14:24:05.073635236 +0000 UTC m=+2504.871467524" observedRunningTime="2025-11-21 14:24:06.231905982 +0000 UTC m=+2506.029738240" watchObservedRunningTime="2025-11-21 14:24:06.242556329 +0000 UTC m=+2506.040388587" Nov 21 14:24:07 crc kubenswrapper[5133]: I1121 14:24:07.957569 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-9w6lg" Nov 21 14:24:07 crc kubenswrapper[5133]: I1121 14:24:07.958846 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-9w6lg" Nov 21 14:24:09 crc kubenswrapper[5133]: I1121 14:24:09.024451 5133 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9w6lg" podUID="a1149747-3645-4424-860e-7915422a24b5" containerName="registry-server" probeResult="failure" output=< Nov 21 14:24:09 crc kubenswrapper[5133]: timeout: failed to connect service ":50051" within 1s Nov 21 14:24:09 crc kubenswrapper[5133]: > Nov 21 14:24:12 crc kubenswrapper[5133]: I1121 14:24:12.299685 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-m8sxg"] Nov 21 14:24:12 crc kubenswrapper[5133]: I1121 14:24:12.310473 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-m8sxg"] Nov 21 14:24:12 crc kubenswrapper[5133]: I1121 14:24:12.330591 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-7hj9b"] Nov 21 14:24:12 crc kubenswrapper[5133]: I1121 14:24:12.338749 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-btw78"] Nov 21 14:24:12 crc kubenswrapper[5133]: I1121 14:24:12.345161 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-tjgmw"] Nov 21 14:24:12 crc kubenswrapper[5133]: I1121 14:24:12.350941 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-d8prn"] Nov 21 14:24:12 crc kubenswrapper[5133]: I1121 14:24:12.356840 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-6rmbg"] Nov 21 14:24:12 crc kubenswrapper[5133]: I1121 14:24:12.365582 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-4hk46"] Nov 21 14:24:12 crc kubenswrapper[5133]: I1121 14:24:12.373166 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-4hk46"] Nov 21 14:24:12 crc kubenswrapper[5133]: I1121 14:24:12.380414 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-d8prn"] Nov 21 14:24:12 crc kubenswrapper[5133]: I1121 14:24:12.387538 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-btw78"] Nov 21 14:24:12 crc kubenswrapper[5133]: I1121 14:24:12.395233 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-c2jrr"] Nov 21 14:24:12 crc kubenswrapper[5133]: I1121 14:24:12.400529 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-tjgmw"] Nov 21 14:24:12 crc kubenswrapper[5133]: I1121 14:24:12.405692 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6d2rx"] Nov 21 14:24:12 crc kubenswrapper[5133]: I1121 14:24:12.411134 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-7hj9b"] Nov 21 14:24:12 crc kubenswrapper[5133]: I1121 14:24:12.416070 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-c2jrr"] Nov 21 14:24:12 crc kubenswrapper[5133]: I1121 14:24:12.421043 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-6rmbg"] Nov 21 14:24:12 crc kubenswrapper[5133]: I1121 14:24:12.425882 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-6rjkp"] Nov 21 14:24:12 crc kubenswrapper[5133]: I1121 14:24:12.430636 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6d2rx"] Nov 21 14:24:12 crc kubenswrapper[5133]: I1121 14:24:12.444935 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-6rjkp"] Nov 21 14:24:12 crc kubenswrapper[5133]: I1121 14:24:12.470837 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0c99cd14-85e0-4b44-ace2-8a624ff98e60" path="/var/lib/kubelet/pods/0c99cd14-85e0-4b44-ace2-8a624ff98e60/volumes" Nov 21 14:24:12 crc kubenswrapper[5133]: I1121 14:24:12.471540 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="39394758-b0aa-434c-b296-7c23eac0abee" path="/var/lib/kubelet/pods/39394758-b0aa-434c-b296-7c23eac0abee/volumes" Nov 21 14:24:12 crc kubenswrapper[5133]: I1121 14:24:12.472200 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="50083ce9-d8d7-40f9-aceb-4fd801670062" path="/var/lib/kubelet/pods/50083ce9-d8d7-40f9-aceb-4fd801670062/volumes" Nov 21 14:24:12 crc kubenswrapper[5133]: I1121 14:24:12.472799 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7d1a1428-a43a-494b-9478-92dba073c524" path="/var/lib/kubelet/pods/7d1a1428-a43a-494b-9478-92dba073c524/volumes" Nov 21 14:24:12 crc kubenswrapper[5133]: I1121 14:24:12.473773 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8300643f-e199-4e87-bee2-0d2e79fdf798" path="/var/lib/kubelet/pods/8300643f-e199-4e87-bee2-0d2e79fdf798/volumes" Nov 21 14:24:12 crc kubenswrapper[5133]: I1121 14:24:12.474433 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a00d8823-6a3e-4348-aefc-e0101509ad83" path="/var/lib/kubelet/pods/a00d8823-6a3e-4348-aefc-e0101509ad83/volumes" Nov 21 14:24:12 crc kubenswrapper[5133]: I1121 14:24:12.474956 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a2eb306a-ed7a-4481-ae0f-5dd3c3046d81" path="/var/lib/kubelet/pods/a2eb306a-ed7a-4481-ae0f-5dd3c3046d81/volumes" Nov 21 14:24:12 crc kubenswrapper[5133]: I1121 14:24:12.475897 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a541160c-9971-4969-9296-23972aaf6bbf" path="/var/lib/kubelet/pods/a541160c-9971-4969-9296-23972aaf6bbf/volumes" Nov 21 14:24:12 crc kubenswrapper[5133]: I1121 14:24:12.476398 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cb8ce05b-7db5-4f97-8f82-93a7f9c00c83" path="/var/lib/kubelet/pods/cb8ce05b-7db5-4f97-8f82-93a7f9c00c83/volumes" Nov 21 14:24:12 crc kubenswrapper[5133]: I1121 14:24:12.476927 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e8455706-f0ef-47cc-8b9f-48390f4b4fa4" path="/var/lib/kubelet/pods/e8455706-f0ef-47cc-8b9f-48390f4b4fa4/volumes" Nov 21 14:24:14 crc kubenswrapper[5133]: I1121 14:24:14.458775 5133 scope.go:117] "RemoveContainer" containerID="c196595136e13d3c4c63ae7c08c87ab795a910aa0f4343c6ca13e03e51955a22" Nov 21 14:24:14 crc kubenswrapper[5133]: E1121 14:24:14.459601 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:24:18 crc kubenswrapper[5133]: I1121 14:24:18.055547 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-9w6lg" Nov 21 14:24:18 crc kubenswrapper[5133]: I1121 14:24:18.104648 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-9w6lg" Nov 21 14:24:18 crc kubenswrapper[5133]: I1121 14:24:18.138151 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rvnn5"] Nov 21 14:24:18 crc kubenswrapper[5133]: I1121 14:24:18.139909 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rvnn5" Nov 21 14:24:18 crc kubenswrapper[5133]: I1121 14:24:18.142797 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 21 14:24:18 crc kubenswrapper[5133]: I1121 14:24:18.142897 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-ztmk4" Nov 21 14:24:18 crc kubenswrapper[5133]: I1121 14:24:18.142977 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 21 14:24:18 crc kubenswrapper[5133]: I1121 14:24:18.143126 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 21 14:24:18 crc kubenswrapper[5133]: I1121 14:24:18.143244 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 21 14:24:18 crc kubenswrapper[5133]: I1121 14:24:18.146684 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rvnn5"] Nov 21 14:24:18 crc kubenswrapper[5133]: I1121 14:24:18.196630 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d8943a9b-ac1b-4f72-86d3-01138d4223e1-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-rvnn5\" (UID: \"d8943a9b-ac1b-4f72-86d3-01138d4223e1\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rvnn5" Nov 21 14:24:18 crc kubenswrapper[5133]: I1121 14:24:18.196729 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8943a9b-ac1b-4f72-86d3-01138d4223e1-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-rvnn5\" (UID: \"d8943a9b-ac1b-4f72-86d3-01138d4223e1\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rvnn5" Nov 21 14:24:18 crc kubenswrapper[5133]: I1121 14:24:18.196785 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d8943a9b-ac1b-4f72-86d3-01138d4223e1-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-rvnn5\" (UID: \"d8943a9b-ac1b-4f72-86d3-01138d4223e1\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rvnn5" Nov 21 14:24:18 crc kubenswrapper[5133]: I1121 14:24:18.196819 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d8943a9b-ac1b-4f72-86d3-01138d4223e1-ceph\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-rvnn5\" (UID: \"d8943a9b-ac1b-4f72-86d3-01138d4223e1\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rvnn5" Nov 21 14:24:18 crc kubenswrapper[5133]: I1121 14:24:18.196857 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-krw6l\" (UniqueName: \"kubernetes.io/projected/d8943a9b-ac1b-4f72-86d3-01138d4223e1-kube-api-access-krw6l\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-rvnn5\" (UID: \"d8943a9b-ac1b-4f72-86d3-01138d4223e1\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rvnn5" Nov 21 14:24:18 crc kubenswrapper[5133]: I1121 14:24:18.290468 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-9w6lg"] Nov 21 14:24:18 crc kubenswrapper[5133]: I1121 14:24:18.297432 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8943a9b-ac1b-4f72-86d3-01138d4223e1-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-rvnn5\" (UID: \"d8943a9b-ac1b-4f72-86d3-01138d4223e1\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rvnn5" Nov 21 14:24:18 crc kubenswrapper[5133]: I1121 14:24:18.297492 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d8943a9b-ac1b-4f72-86d3-01138d4223e1-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-rvnn5\" (UID: \"d8943a9b-ac1b-4f72-86d3-01138d4223e1\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rvnn5" Nov 21 14:24:18 crc kubenswrapper[5133]: I1121 14:24:18.297530 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d8943a9b-ac1b-4f72-86d3-01138d4223e1-ceph\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-rvnn5\" (UID: \"d8943a9b-ac1b-4f72-86d3-01138d4223e1\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rvnn5" Nov 21 14:24:18 crc kubenswrapper[5133]: I1121 14:24:18.297569 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-krw6l\" (UniqueName: \"kubernetes.io/projected/d8943a9b-ac1b-4f72-86d3-01138d4223e1-kube-api-access-krw6l\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-rvnn5\" (UID: \"d8943a9b-ac1b-4f72-86d3-01138d4223e1\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rvnn5" Nov 21 14:24:18 crc kubenswrapper[5133]: I1121 14:24:18.297589 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d8943a9b-ac1b-4f72-86d3-01138d4223e1-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-rvnn5\" (UID: \"d8943a9b-ac1b-4f72-86d3-01138d4223e1\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rvnn5" Nov 21 14:24:18 crc kubenswrapper[5133]: I1121 14:24:18.303610 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d8943a9b-ac1b-4f72-86d3-01138d4223e1-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-rvnn5\" (UID: \"d8943a9b-ac1b-4f72-86d3-01138d4223e1\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rvnn5" Nov 21 14:24:18 crc kubenswrapper[5133]: I1121 14:24:18.303816 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d8943a9b-ac1b-4f72-86d3-01138d4223e1-ceph\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-rvnn5\" (UID: \"d8943a9b-ac1b-4f72-86d3-01138d4223e1\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rvnn5" Nov 21 14:24:18 crc kubenswrapper[5133]: I1121 14:24:18.303914 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8943a9b-ac1b-4f72-86d3-01138d4223e1-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-rvnn5\" (UID: \"d8943a9b-ac1b-4f72-86d3-01138d4223e1\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rvnn5" Nov 21 14:24:18 crc kubenswrapper[5133]: I1121 14:24:18.305160 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d8943a9b-ac1b-4f72-86d3-01138d4223e1-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-rvnn5\" (UID: \"d8943a9b-ac1b-4f72-86d3-01138d4223e1\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rvnn5" Nov 21 14:24:18 crc kubenswrapper[5133]: I1121 14:24:18.315383 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-krw6l\" (UniqueName: \"kubernetes.io/projected/d8943a9b-ac1b-4f72-86d3-01138d4223e1-kube-api-access-krw6l\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-rvnn5\" (UID: \"d8943a9b-ac1b-4f72-86d3-01138d4223e1\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rvnn5" Nov 21 14:24:18 crc kubenswrapper[5133]: I1121 14:24:18.459824 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rvnn5" Nov 21 14:24:19 crc kubenswrapper[5133]: I1121 14:24:19.026670 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rvnn5"] Nov 21 14:24:19 crc kubenswrapper[5133]: W1121 14:24:19.037378 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd8943a9b_ac1b_4f72_86d3_01138d4223e1.slice/crio-a55fc4991cacc230dd2585d9c3090b6687c40e44cc3d3dc07e3a7806cc852d77 WatchSource:0}: Error finding container a55fc4991cacc230dd2585d9c3090b6687c40e44cc3d3dc07e3a7806cc852d77: Status 404 returned error can't find the container with id a55fc4991cacc230dd2585d9c3090b6687c40e44cc3d3dc07e3a7806cc852d77 Nov 21 14:24:19 crc kubenswrapper[5133]: I1121 14:24:19.344774 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rvnn5" event={"ID":"d8943a9b-ac1b-4f72-86d3-01138d4223e1","Type":"ContainerStarted","Data":"a55fc4991cacc230dd2585d9c3090b6687c40e44cc3d3dc07e3a7806cc852d77"} Nov 21 14:24:19 crc kubenswrapper[5133]: I1121 14:24:19.344892 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-9w6lg" podUID="a1149747-3645-4424-860e-7915422a24b5" containerName="registry-server" containerID="cri-o://afcf836ad5e9801d307998afd7b16b5a8b4b8ec92e3e6998aa5a052a722bd458" gracePeriod=2 Nov 21 14:24:20 crc kubenswrapper[5133]: I1121 14:24:20.218595 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9w6lg" Nov 21 14:24:20 crc kubenswrapper[5133]: I1121 14:24:20.235295 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-thqf2\" (UniqueName: \"kubernetes.io/projected/a1149747-3645-4424-860e-7915422a24b5-kube-api-access-thqf2\") pod \"a1149747-3645-4424-860e-7915422a24b5\" (UID: \"a1149747-3645-4424-860e-7915422a24b5\") " Nov 21 14:24:20 crc kubenswrapper[5133]: I1121 14:24:20.235360 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a1149747-3645-4424-860e-7915422a24b5-utilities\") pod \"a1149747-3645-4424-860e-7915422a24b5\" (UID: \"a1149747-3645-4424-860e-7915422a24b5\") " Nov 21 14:24:20 crc kubenswrapper[5133]: I1121 14:24:20.235382 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a1149747-3645-4424-860e-7915422a24b5-catalog-content\") pod \"a1149747-3645-4424-860e-7915422a24b5\" (UID: \"a1149747-3645-4424-860e-7915422a24b5\") " Nov 21 14:24:20 crc kubenswrapper[5133]: I1121 14:24:20.239363 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a1149747-3645-4424-860e-7915422a24b5-utilities" (OuterVolumeSpecName: "utilities") pod "a1149747-3645-4424-860e-7915422a24b5" (UID: "a1149747-3645-4424-860e-7915422a24b5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:24:20 crc kubenswrapper[5133]: I1121 14:24:20.241918 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a1149747-3645-4424-860e-7915422a24b5-kube-api-access-thqf2" (OuterVolumeSpecName: "kube-api-access-thqf2") pod "a1149747-3645-4424-860e-7915422a24b5" (UID: "a1149747-3645-4424-860e-7915422a24b5"). InnerVolumeSpecName "kube-api-access-thqf2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:24:20 crc kubenswrapper[5133]: I1121 14:24:20.338138 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a1149747-3645-4424-860e-7915422a24b5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a1149747-3645-4424-860e-7915422a24b5" (UID: "a1149747-3645-4424-860e-7915422a24b5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:24:20 crc kubenswrapper[5133]: I1121 14:24:20.339347 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-thqf2\" (UniqueName: \"kubernetes.io/projected/a1149747-3645-4424-860e-7915422a24b5-kube-api-access-thqf2\") on node \"crc\" DevicePath \"\"" Nov 21 14:24:20 crc kubenswrapper[5133]: I1121 14:24:20.339369 5133 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a1149747-3645-4424-860e-7915422a24b5-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 14:24:20 crc kubenswrapper[5133]: I1121 14:24:20.339378 5133 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a1149747-3645-4424-860e-7915422a24b5-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 14:24:20 crc kubenswrapper[5133]: I1121 14:24:20.361648 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rvnn5" event={"ID":"d8943a9b-ac1b-4f72-86d3-01138d4223e1","Type":"ContainerStarted","Data":"980f09f751feba36f7ec80292d8a9c9326543850f268069457419c8f9e17cb04"} Nov 21 14:24:20 crc kubenswrapper[5133]: I1121 14:24:20.364164 5133 generic.go:334] "Generic (PLEG): container finished" podID="a1149747-3645-4424-860e-7915422a24b5" containerID="afcf836ad5e9801d307998afd7b16b5a8b4b8ec92e3e6998aa5a052a722bd458" exitCode=0 Nov 21 14:24:20 crc kubenswrapper[5133]: I1121 14:24:20.364193 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9w6lg" event={"ID":"a1149747-3645-4424-860e-7915422a24b5","Type":"ContainerDied","Data":"afcf836ad5e9801d307998afd7b16b5a8b4b8ec92e3e6998aa5a052a722bd458"} Nov 21 14:24:20 crc kubenswrapper[5133]: I1121 14:24:20.364223 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9w6lg" Nov 21 14:24:20 crc kubenswrapper[5133]: I1121 14:24:20.364242 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9w6lg" event={"ID":"a1149747-3645-4424-860e-7915422a24b5","Type":"ContainerDied","Data":"cfc68d7b3558a2d202038328ee96dc11f3f090872917aacbf875ab7d95267e00"} Nov 21 14:24:20 crc kubenswrapper[5133]: I1121 14:24:20.364276 5133 scope.go:117] "RemoveContainer" containerID="afcf836ad5e9801d307998afd7b16b5a8b4b8ec92e3e6998aa5a052a722bd458" Nov 21 14:24:20 crc kubenswrapper[5133]: I1121 14:24:20.383274 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rvnn5" podStartSLOduration=1.6097486189999999 podStartE2EDuration="2.383192992s" podCreationTimestamp="2025-11-21 14:24:18 +0000 UTC" firstStartedPulling="2025-11-21 14:24:19.040171967 +0000 UTC m=+2518.838004215" lastFinishedPulling="2025-11-21 14:24:19.8136163 +0000 UTC m=+2519.611448588" observedRunningTime="2025-11-21 14:24:20.376977686 +0000 UTC m=+2520.174809944" watchObservedRunningTime="2025-11-21 14:24:20.383192992 +0000 UTC m=+2520.181025260" Nov 21 14:24:20 crc kubenswrapper[5133]: I1121 14:24:20.386468 5133 scope.go:117] "RemoveContainer" containerID="2118fa6b00dc5a8011600c895b0b4373d0ecf73f0b8f4abfbe1e24293705c757" Nov 21 14:24:20 crc kubenswrapper[5133]: I1121 14:24:20.403503 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-9w6lg"] Nov 21 14:24:20 crc kubenswrapper[5133]: I1121 14:24:20.411521 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-9w6lg"] Nov 21 14:24:20 crc kubenswrapper[5133]: I1121 14:24:20.437695 5133 scope.go:117] "RemoveContainer" containerID="e0b00150fd93b0f054d792892d001cd14c1b5bf1509ebfdf48d3793285b9077b" Nov 21 14:24:20 crc kubenswrapper[5133]: I1121 14:24:20.458732 5133 scope.go:117] "RemoveContainer" containerID="afcf836ad5e9801d307998afd7b16b5a8b4b8ec92e3e6998aa5a052a722bd458" Nov 21 14:24:20 crc kubenswrapper[5133]: E1121 14:24:20.460382 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"afcf836ad5e9801d307998afd7b16b5a8b4b8ec92e3e6998aa5a052a722bd458\": container with ID starting with afcf836ad5e9801d307998afd7b16b5a8b4b8ec92e3e6998aa5a052a722bd458 not found: ID does not exist" containerID="afcf836ad5e9801d307998afd7b16b5a8b4b8ec92e3e6998aa5a052a722bd458" Nov 21 14:24:20 crc kubenswrapper[5133]: I1121 14:24:20.460429 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"afcf836ad5e9801d307998afd7b16b5a8b4b8ec92e3e6998aa5a052a722bd458"} err="failed to get container status \"afcf836ad5e9801d307998afd7b16b5a8b4b8ec92e3e6998aa5a052a722bd458\": rpc error: code = NotFound desc = could not find container \"afcf836ad5e9801d307998afd7b16b5a8b4b8ec92e3e6998aa5a052a722bd458\": container with ID starting with afcf836ad5e9801d307998afd7b16b5a8b4b8ec92e3e6998aa5a052a722bd458 not found: ID does not exist" Nov 21 14:24:20 crc kubenswrapper[5133]: I1121 14:24:20.460462 5133 scope.go:117] "RemoveContainer" containerID="2118fa6b00dc5a8011600c895b0b4373d0ecf73f0b8f4abfbe1e24293705c757" Nov 21 14:24:20 crc kubenswrapper[5133]: E1121 14:24:20.461367 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2118fa6b00dc5a8011600c895b0b4373d0ecf73f0b8f4abfbe1e24293705c757\": container with ID starting with 2118fa6b00dc5a8011600c895b0b4373d0ecf73f0b8f4abfbe1e24293705c757 not found: ID does not exist" containerID="2118fa6b00dc5a8011600c895b0b4373d0ecf73f0b8f4abfbe1e24293705c757" Nov 21 14:24:20 crc kubenswrapper[5133]: I1121 14:24:20.461406 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2118fa6b00dc5a8011600c895b0b4373d0ecf73f0b8f4abfbe1e24293705c757"} err="failed to get container status \"2118fa6b00dc5a8011600c895b0b4373d0ecf73f0b8f4abfbe1e24293705c757\": rpc error: code = NotFound desc = could not find container \"2118fa6b00dc5a8011600c895b0b4373d0ecf73f0b8f4abfbe1e24293705c757\": container with ID starting with 2118fa6b00dc5a8011600c895b0b4373d0ecf73f0b8f4abfbe1e24293705c757 not found: ID does not exist" Nov 21 14:24:20 crc kubenswrapper[5133]: I1121 14:24:20.461430 5133 scope.go:117] "RemoveContainer" containerID="e0b00150fd93b0f054d792892d001cd14c1b5bf1509ebfdf48d3793285b9077b" Nov 21 14:24:20 crc kubenswrapper[5133]: E1121 14:24:20.469839 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e0b00150fd93b0f054d792892d001cd14c1b5bf1509ebfdf48d3793285b9077b\": container with ID starting with e0b00150fd93b0f054d792892d001cd14c1b5bf1509ebfdf48d3793285b9077b not found: ID does not exist" containerID="e0b00150fd93b0f054d792892d001cd14c1b5bf1509ebfdf48d3793285b9077b" Nov 21 14:24:20 crc kubenswrapper[5133]: I1121 14:24:20.469897 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e0b00150fd93b0f054d792892d001cd14c1b5bf1509ebfdf48d3793285b9077b"} err="failed to get container status \"e0b00150fd93b0f054d792892d001cd14c1b5bf1509ebfdf48d3793285b9077b\": rpc error: code = NotFound desc = could not find container \"e0b00150fd93b0f054d792892d001cd14c1b5bf1509ebfdf48d3793285b9077b\": container with ID starting with e0b00150fd93b0f054d792892d001cd14c1b5bf1509ebfdf48d3793285b9077b not found: ID does not exist" Nov 21 14:24:20 crc kubenswrapper[5133]: I1121 14:24:20.491720 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a1149747-3645-4424-860e-7915422a24b5" path="/var/lib/kubelet/pods/a1149747-3645-4424-860e-7915422a24b5/volumes" Nov 21 14:24:25 crc kubenswrapper[5133]: I1121 14:24:25.457979 5133 scope.go:117] "RemoveContainer" containerID="c196595136e13d3c4c63ae7c08c87ab795a910aa0f4343c6ca13e03e51955a22" Nov 21 14:24:25 crc kubenswrapper[5133]: E1121 14:24:25.459210 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:24:34 crc kubenswrapper[5133]: I1121 14:24:34.523743 5133 generic.go:334] "Generic (PLEG): container finished" podID="d8943a9b-ac1b-4f72-86d3-01138d4223e1" containerID="980f09f751feba36f7ec80292d8a9c9326543850f268069457419c8f9e17cb04" exitCode=0 Nov 21 14:24:34 crc kubenswrapper[5133]: I1121 14:24:34.524515 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rvnn5" event={"ID":"d8943a9b-ac1b-4f72-86d3-01138d4223e1","Type":"ContainerDied","Data":"980f09f751feba36f7ec80292d8a9c9326543850f268069457419c8f9e17cb04"} Nov 21 14:24:36 crc kubenswrapper[5133]: I1121 14:24:36.016668 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rvnn5" Nov 21 14:24:36 crc kubenswrapper[5133]: I1121 14:24:36.075086 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-krw6l\" (UniqueName: \"kubernetes.io/projected/d8943a9b-ac1b-4f72-86d3-01138d4223e1-kube-api-access-krw6l\") pod \"d8943a9b-ac1b-4f72-86d3-01138d4223e1\" (UID: \"d8943a9b-ac1b-4f72-86d3-01138d4223e1\") " Nov 21 14:24:36 crc kubenswrapper[5133]: I1121 14:24:36.075231 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d8943a9b-ac1b-4f72-86d3-01138d4223e1-ceph\") pod \"d8943a9b-ac1b-4f72-86d3-01138d4223e1\" (UID: \"d8943a9b-ac1b-4f72-86d3-01138d4223e1\") " Nov 21 14:24:36 crc kubenswrapper[5133]: I1121 14:24:36.075497 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d8943a9b-ac1b-4f72-86d3-01138d4223e1-ssh-key\") pod \"d8943a9b-ac1b-4f72-86d3-01138d4223e1\" (UID: \"d8943a9b-ac1b-4f72-86d3-01138d4223e1\") " Nov 21 14:24:36 crc kubenswrapper[5133]: I1121 14:24:36.075575 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d8943a9b-ac1b-4f72-86d3-01138d4223e1-inventory\") pod \"d8943a9b-ac1b-4f72-86d3-01138d4223e1\" (UID: \"d8943a9b-ac1b-4f72-86d3-01138d4223e1\") " Nov 21 14:24:36 crc kubenswrapper[5133]: I1121 14:24:36.075668 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8943a9b-ac1b-4f72-86d3-01138d4223e1-repo-setup-combined-ca-bundle\") pod \"d8943a9b-ac1b-4f72-86d3-01138d4223e1\" (UID: \"d8943a9b-ac1b-4f72-86d3-01138d4223e1\") " Nov 21 14:24:36 crc kubenswrapper[5133]: I1121 14:24:36.084287 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d8943a9b-ac1b-4f72-86d3-01138d4223e1-kube-api-access-krw6l" (OuterVolumeSpecName: "kube-api-access-krw6l") pod "d8943a9b-ac1b-4f72-86d3-01138d4223e1" (UID: "d8943a9b-ac1b-4f72-86d3-01138d4223e1"). InnerVolumeSpecName "kube-api-access-krw6l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:24:36 crc kubenswrapper[5133]: I1121 14:24:36.085855 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d8943a9b-ac1b-4f72-86d3-01138d4223e1-ceph" (OuterVolumeSpecName: "ceph") pod "d8943a9b-ac1b-4f72-86d3-01138d4223e1" (UID: "d8943a9b-ac1b-4f72-86d3-01138d4223e1"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:24:36 crc kubenswrapper[5133]: I1121 14:24:36.088753 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d8943a9b-ac1b-4f72-86d3-01138d4223e1-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "d8943a9b-ac1b-4f72-86d3-01138d4223e1" (UID: "d8943a9b-ac1b-4f72-86d3-01138d4223e1"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:24:36 crc kubenswrapper[5133]: I1121 14:24:36.106098 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d8943a9b-ac1b-4f72-86d3-01138d4223e1-inventory" (OuterVolumeSpecName: "inventory") pod "d8943a9b-ac1b-4f72-86d3-01138d4223e1" (UID: "d8943a9b-ac1b-4f72-86d3-01138d4223e1"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:24:36 crc kubenswrapper[5133]: I1121 14:24:36.113596 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d8943a9b-ac1b-4f72-86d3-01138d4223e1-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "d8943a9b-ac1b-4f72-86d3-01138d4223e1" (UID: "d8943a9b-ac1b-4f72-86d3-01138d4223e1"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:24:36 crc kubenswrapper[5133]: I1121 14:24:36.177436 5133 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8943a9b-ac1b-4f72-86d3-01138d4223e1-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 14:24:36 crc kubenswrapper[5133]: I1121 14:24:36.177499 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-krw6l\" (UniqueName: \"kubernetes.io/projected/d8943a9b-ac1b-4f72-86d3-01138d4223e1-kube-api-access-krw6l\") on node \"crc\" DevicePath \"\"" Nov 21 14:24:36 crc kubenswrapper[5133]: I1121 14:24:36.177526 5133 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d8943a9b-ac1b-4f72-86d3-01138d4223e1-ceph\") on node \"crc\" DevicePath \"\"" Nov 21 14:24:36 crc kubenswrapper[5133]: I1121 14:24:36.177543 5133 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d8943a9b-ac1b-4f72-86d3-01138d4223e1-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 21 14:24:36 crc kubenswrapper[5133]: I1121 14:24:36.177564 5133 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d8943a9b-ac1b-4f72-86d3-01138d4223e1-inventory\") on node \"crc\" DevicePath \"\"" Nov 21 14:24:36 crc kubenswrapper[5133]: I1121 14:24:36.542135 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rvnn5" event={"ID":"d8943a9b-ac1b-4f72-86d3-01138d4223e1","Type":"ContainerDied","Data":"a55fc4991cacc230dd2585d9c3090b6687c40e44cc3d3dc07e3a7806cc852d77"} Nov 21 14:24:36 crc kubenswrapper[5133]: I1121 14:24:36.542166 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-rvnn5" Nov 21 14:24:36 crc kubenswrapper[5133]: I1121 14:24:36.542194 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a55fc4991cacc230dd2585d9c3090b6687c40e44cc3d3dc07e3a7806cc852d77" Nov 21 14:24:36 crc kubenswrapper[5133]: I1121 14:24:36.634380 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-8x5fz"] Nov 21 14:24:36 crc kubenswrapper[5133]: E1121 14:24:36.635190 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a1149747-3645-4424-860e-7915422a24b5" containerName="extract-content" Nov 21 14:24:36 crc kubenswrapper[5133]: I1121 14:24:36.635213 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="a1149747-3645-4424-860e-7915422a24b5" containerName="extract-content" Nov 21 14:24:36 crc kubenswrapper[5133]: E1121 14:24:36.635230 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8943a9b-ac1b-4f72-86d3-01138d4223e1" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 21 14:24:36 crc kubenswrapper[5133]: I1121 14:24:36.635240 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8943a9b-ac1b-4f72-86d3-01138d4223e1" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 21 14:24:36 crc kubenswrapper[5133]: E1121 14:24:36.635268 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a1149747-3645-4424-860e-7915422a24b5" containerName="extract-utilities" Nov 21 14:24:36 crc kubenswrapper[5133]: I1121 14:24:36.635277 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="a1149747-3645-4424-860e-7915422a24b5" containerName="extract-utilities" Nov 21 14:24:36 crc kubenswrapper[5133]: E1121 14:24:36.635298 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a1149747-3645-4424-860e-7915422a24b5" containerName="registry-server" Nov 21 14:24:36 crc kubenswrapper[5133]: I1121 14:24:36.635307 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="a1149747-3645-4424-860e-7915422a24b5" containerName="registry-server" Nov 21 14:24:36 crc kubenswrapper[5133]: I1121 14:24:36.635512 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="a1149747-3645-4424-860e-7915422a24b5" containerName="registry-server" Nov 21 14:24:36 crc kubenswrapper[5133]: I1121 14:24:36.635540 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8943a9b-ac1b-4f72-86d3-01138d4223e1" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 21 14:24:36 crc kubenswrapper[5133]: I1121 14:24:36.636306 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-8x5fz" Nov 21 14:24:36 crc kubenswrapper[5133]: I1121 14:24:36.641492 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 21 14:24:36 crc kubenswrapper[5133]: I1121 14:24:36.641576 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 21 14:24:36 crc kubenswrapper[5133]: I1121 14:24:36.641749 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-ztmk4" Nov 21 14:24:36 crc kubenswrapper[5133]: I1121 14:24:36.647188 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 21 14:24:36 crc kubenswrapper[5133]: I1121 14:24:36.650693 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-8x5fz"] Nov 21 14:24:36 crc kubenswrapper[5133]: I1121 14:24:36.650867 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 21 14:24:36 crc kubenswrapper[5133]: I1121 14:24:36.801850 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/bcf3ecf2-fb8c-424f-a9e8-600a16795bed-ceph\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-8x5fz\" (UID: \"bcf3ecf2-fb8c-424f-a9e8-600a16795bed\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-8x5fz" Nov 21 14:24:36 crc kubenswrapper[5133]: I1121 14:24:36.801930 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bcf3ecf2-fb8c-424f-a9e8-600a16795bed-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-8x5fz\" (UID: \"bcf3ecf2-fb8c-424f-a9e8-600a16795bed\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-8x5fz" Nov 21 14:24:36 crc kubenswrapper[5133]: I1121 14:24:36.802585 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bcf3ecf2-fb8c-424f-a9e8-600a16795bed-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-8x5fz\" (UID: \"bcf3ecf2-fb8c-424f-a9e8-600a16795bed\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-8x5fz" Nov 21 14:24:36 crc kubenswrapper[5133]: I1121 14:24:36.802709 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h2pxt\" (UniqueName: \"kubernetes.io/projected/bcf3ecf2-fb8c-424f-a9e8-600a16795bed-kube-api-access-h2pxt\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-8x5fz\" (UID: \"bcf3ecf2-fb8c-424f-a9e8-600a16795bed\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-8x5fz" Nov 21 14:24:36 crc kubenswrapper[5133]: I1121 14:24:36.802737 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bcf3ecf2-fb8c-424f-a9e8-600a16795bed-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-8x5fz\" (UID: \"bcf3ecf2-fb8c-424f-a9e8-600a16795bed\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-8x5fz" Nov 21 14:24:36 crc kubenswrapper[5133]: I1121 14:24:36.904505 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h2pxt\" (UniqueName: \"kubernetes.io/projected/bcf3ecf2-fb8c-424f-a9e8-600a16795bed-kube-api-access-h2pxt\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-8x5fz\" (UID: \"bcf3ecf2-fb8c-424f-a9e8-600a16795bed\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-8x5fz" Nov 21 14:24:36 crc kubenswrapper[5133]: I1121 14:24:36.904547 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bcf3ecf2-fb8c-424f-a9e8-600a16795bed-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-8x5fz\" (UID: \"bcf3ecf2-fb8c-424f-a9e8-600a16795bed\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-8x5fz" Nov 21 14:24:36 crc kubenswrapper[5133]: I1121 14:24:36.904774 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/bcf3ecf2-fb8c-424f-a9e8-600a16795bed-ceph\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-8x5fz\" (UID: \"bcf3ecf2-fb8c-424f-a9e8-600a16795bed\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-8x5fz" Nov 21 14:24:36 crc kubenswrapper[5133]: I1121 14:24:36.904813 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bcf3ecf2-fb8c-424f-a9e8-600a16795bed-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-8x5fz\" (UID: \"bcf3ecf2-fb8c-424f-a9e8-600a16795bed\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-8x5fz" Nov 21 14:24:36 crc kubenswrapper[5133]: I1121 14:24:36.904860 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bcf3ecf2-fb8c-424f-a9e8-600a16795bed-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-8x5fz\" (UID: \"bcf3ecf2-fb8c-424f-a9e8-600a16795bed\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-8x5fz" Nov 21 14:24:36 crc kubenswrapper[5133]: I1121 14:24:36.913675 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/bcf3ecf2-fb8c-424f-a9e8-600a16795bed-ceph\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-8x5fz\" (UID: \"bcf3ecf2-fb8c-424f-a9e8-600a16795bed\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-8x5fz" Nov 21 14:24:36 crc kubenswrapper[5133]: I1121 14:24:36.913854 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bcf3ecf2-fb8c-424f-a9e8-600a16795bed-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-8x5fz\" (UID: \"bcf3ecf2-fb8c-424f-a9e8-600a16795bed\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-8x5fz" Nov 21 14:24:36 crc kubenswrapper[5133]: I1121 14:24:36.914435 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bcf3ecf2-fb8c-424f-a9e8-600a16795bed-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-8x5fz\" (UID: \"bcf3ecf2-fb8c-424f-a9e8-600a16795bed\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-8x5fz" Nov 21 14:24:36 crc kubenswrapper[5133]: I1121 14:24:36.915601 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bcf3ecf2-fb8c-424f-a9e8-600a16795bed-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-8x5fz\" (UID: \"bcf3ecf2-fb8c-424f-a9e8-600a16795bed\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-8x5fz" Nov 21 14:24:36 crc kubenswrapper[5133]: I1121 14:24:36.925910 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h2pxt\" (UniqueName: \"kubernetes.io/projected/bcf3ecf2-fb8c-424f-a9e8-600a16795bed-kube-api-access-h2pxt\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-8x5fz\" (UID: \"bcf3ecf2-fb8c-424f-a9e8-600a16795bed\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-8x5fz" Nov 21 14:24:37 crc kubenswrapper[5133]: I1121 14:24:37.012316 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-8x5fz" Nov 21 14:24:37 crc kubenswrapper[5133]: I1121 14:24:37.406497 5133 scope.go:117] "RemoveContainer" containerID="0a4bbd90d94eb25cb896ecd2edd0ab360894c487cac140d31208295287d3dce4" Nov 21 14:24:37 crc kubenswrapper[5133]: I1121 14:24:37.413213 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-8x5fz"] Nov 21 14:24:37 crc kubenswrapper[5133]: W1121 14:24:37.418447 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbcf3ecf2_fb8c_424f_a9e8_600a16795bed.slice/crio-7c331000f5f3a11a781ab1c49e82b8bae5dfb4e0dbbd207c9d43ccd8ae1f059b WatchSource:0}: Error finding container 7c331000f5f3a11a781ab1c49e82b8bae5dfb4e0dbbd207c9d43ccd8ae1f059b: Status 404 returned error can't find the container with id 7c331000f5f3a11a781ab1c49e82b8bae5dfb4e0dbbd207c9d43ccd8ae1f059b Nov 21 14:24:37 crc kubenswrapper[5133]: I1121 14:24:37.487650 5133 scope.go:117] "RemoveContainer" containerID="c5eb8cff7df0169eda8c94fb4dd988a2c53d2c5325725035badd2cf7b1d644c5" Nov 21 14:24:37 crc kubenswrapper[5133]: I1121 14:24:37.531172 5133 scope.go:117] "RemoveContainer" containerID="3eb41b1518dedfbb91f03a54a2e535c4e09ce3ecb0746940b9bbfa7e1811073e" Nov 21 14:24:37 crc kubenswrapper[5133]: I1121 14:24:37.556297 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-8x5fz" event={"ID":"bcf3ecf2-fb8c-424f-a9e8-600a16795bed","Type":"ContainerStarted","Data":"7c331000f5f3a11a781ab1c49e82b8bae5dfb4e0dbbd207c9d43ccd8ae1f059b"} Nov 21 14:24:37 crc kubenswrapper[5133]: I1121 14:24:37.567958 5133 scope.go:117] "RemoveContainer" containerID="6b3405b74425624cff63d2e4b9590b860c945b68b04c30d869af9b4c1098c58a" Nov 21 14:24:37 crc kubenswrapper[5133]: I1121 14:24:37.610991 5133 scope.go:117] "RemoveContainer" containerID="859852c198f5f3f6fba179cf2d1f27465aaa13222a592f190933741620fbc7f6" Nov 21 14:24:37 crc kubenswrapper[5133]: I1121 14:24:37.642977 5133 scope.go:117] "RemoveContainer" containerID="64d4679b63c273b6c23751780d89e8e0b0c2d539414f1f7b6d6e4885141cfe6e" Nov 21 14:24:37 crc kubenswrapper[5133]: I1121 14:24:37.683621 5133 scope.go:117] "RemoveContainer" containerID="fcfec1202350fee4a0f4eb21322dde6f71001844f038b4bcad89fdf33cc1798d" Nov 21 14:24:37 crc kubenswrapper[5133]: I1121 14:24:37.730499 5133 scope.go:117] "RemoveContainer" containerID="cbd66e4f5fe271e3ad76ab9c4422dba23228103a77ba7e97e882f314be1db169" Nov 21 14:24:37 crc kubenswrapper[5133]: I1121 14:24:37.801610 5133 scope.go:117] "RemoveContainer" containerID="32f7eb89303faa09234527ee8fd83a587998293788060d8ec123d23e54a1c964" Nov 21 14:24:37 crc kubenswrapper[5133]: I1121 14:24:37.866519 5133 scope.go:117] "RemoveContainer" containerID="4e067bb5548a20b221329767cdc1354ceeb3307ef6d4f13dc58ad5fc7a7aa4c3" Nov 21 14:24:38 crc kubenswrapper[5133]: I1121 14:24:38.459701 5133 scope.go:117] "RemoveContainer" containerID="c196595136e13d3c4c63ae7c08c87ab795a910aa0f4343c6ca13e03e51955a22" Nov 21 14:24:38 crc kubenswrapper[5133]: E1121 14:24:38.460358 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:24:39 crc kubenswrapper[5133]: I1121 14:24:39.573215 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-8x5fz" event={"ID":"bcf3ecf2-fb8c-424f-a9e8-600a16795bed","Type":"ContainerStarted","Data":"be9541cc0a0e3fa77ed6c5ecb233e2df268e56ce683c37fe1a00ad72e23531cf"} Nov 21 14:24:39 crc kubenswrapper[5133]: I1121 14:24:39.605578 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-8x5fz" podStartSLOduration=2.6590773370000003 podStartE2EDuration="3.605551209s" podCreationTimestamp="2025-11-21 14:24:36 +0000 UTC" firstStartedPulling="2025-11-21 14:24:37.421628229 +0000 UTC m=+2537.219460517" lastFinishedPulling="2025-11-21 14:24:38.368102141 +0000 UTC m=+2538.165934389" observedRunningTime="2025-11-21 14:24:39.59427284 +0000 UTC m=+2539.392105098" watchObservedRunningTime="2025-11-21 14:24:39.605551209 +0000 UTC m=+2539.403383497" Nov 21 14:24:51 crc kubenswrapper[5133]: I1121 14:24:51.458230 5133 scope.go:117] "RemoveContainer" containerID="c196595136e13d3c4c63ae7c08c87ab795a910aa0f4343c6ca13e03e51955a22" Nov 21 14:24:51 crc kubenswrapper[5133]: E1121 14:24:51.459244 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:25:06 crc kubenswrapper[5133]: I1121 14:25:06.457802 5133 scope.go:117] "RemoveContainer" containerID="c196595136e13d3c4c63ae7c08c87ab795a910aa0f4343c6ca13e03e51955a22" Nov 21 14:25:06 crc kubenswrapper[5133]: E1121 14:25:06.458689 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:25:19 crc kubenswrapper[5133]: I1121 14:25:19.458106 5133 scope.go:117] "RemoveContainer" containerID="c196595136e13d3c4c63ae7c08c87ab795a910aa0f4343c6ca13e03e51955a22" Nov 21 14:25:19 crc kubenswrapper[5133]: E1121 14:25:19.459420 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:25:31 crc kubenswrapper[5133]: I1121 14:25:31.458723 5133 scope.go:117] "RemoveContainer" containerID="c196595136e13d3c4c63ae7c08c87ab795a910aa0f4343c6ca13e03e51955a22" Nov 21 14:25:31 crc kubenswrapper[5133]: E1121 14:25:31.460530 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:25:42 crc kubenswrapper[5133]: I1121 14:25:42.491704 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-stgnv"] Nov 21 14:25:42 crc kubenswrapper[5133]: I1121 14:25:42.509537 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-stgnv" Nov 21 14:25:42 crc kubenswrapper[5133]: I1121 14:25:42.515795 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-stgnv"] Nov 21 14:25:42 crc kubenswrapper[5133]: I1121 14:25:42.640127 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7b68b1ed-70dc-4f97-884d-473b882b10c8-utilities\") pod \"community-operators-stgnv\" (UID: \"7b68b1ed-70dc-4f97-884d-473b882b10c8\") " pod="openshift-marketplace/community-operators-stgnv" Nov 21 14:25:42 crc kubenswrapper[5133]: I1121 14:25:42.640266 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-78th6\" (UniqueName: \"kubernetes.io/projected/7b68b1ed-70dc-4f97-884d-473b882b10c8-kube-api-access-78th6\") pod \"community-operators-stgnv\" (UID: \"7b68b1ed-70dc-4f97-884d-473b882b10c8\") " pod="openshift-marketplace/community-operators-stgnv" Nov 21 14:25:42 crc kubenswrapper[5133]: I1121 14:25:42.640359 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7b68b1ed-70dc-4f97-884d-473b882b10c8-catalog-content\") pod \"community-operators-stgnv\" (UID: \"7b68b1ed-70dc-4f97-884d-473b882b10c8\") " pod="openshift-marketplace/community-operators-stgnv" Nov 21 14:25:42 crc kubenswrapper[5133]: I1121 14:25:42.742472 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-78th6\" (UniqueName: \"kubernetes.io/projected/7b68b1ed-70dc-4f97-884d-473b882b10c8-kube-api-access-78th6\") pod \"community-operators-stgnv\" (UID: \"7b68b1ed-70dc-4f97-884d-473b882b10c8\") " pod="openshift-marketplace/community-operators-stgnv" Nov 21 14:25:42 crc kubenswrapper[5133]: I1121 14:25:42.742758 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7b68b1ed-70dc-4f97-884d-473b882b10c8-catalog-content\") pod \"community-operators-stgnv\" (UID: \"7b68b1ed-70dc-4f97-884d-473b882b10c8\") " pod="openshift-marketplace/community-operators-stgnv" Nov 21 14:25:42 crc kubenswrapper[5133]: I1121 14:25:42.742986 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7b68b1ed-70dc-4f97-884d-473b882b10c8-utilities\") pod \"community-operators-stgnv\" (UID: \"7b68b1ed-70dc-4f97-884d-473b882b10c8\") " pod="openshift-marketplace/community-operators-stgnv" Nov 21 14:25:42 crc kubenswrapper[5133]: I1121 14:25:42.743459 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7b68b1ed-70dc-4f97-884d-473b882b10c8-catalog-content\") pod \"community-operators-stgnv\" (UID: \"7b68b1ed-70dc-4f97-884d-473b882b10c8\") " pod="openshift-marketplace/community-operators-stgnv" Nov 21 14:25:42 crc kubenswrapper[5133]: I1121 14:25:42.743620 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7b68b1ed-70dc-4f97-884d-473b882b10c8-utilities\") pod \"community-operators-stgnv\" (UID: \"7b68b1ed-70dc-4f97-884d-473b882b10c8\") " pod="openshift-marketplace/community-operators-stgnv" Nov 21 14:25:42 crc kubenswrapper[5133]: I1121 14:25:42.771914 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-78th6\" (UniqueName: \"kubernetes.io/projected/7b68b1ed-70dc-4f97-884d-473b882b10c8-kube-api-access-78th6\") pod \"community-operators-stgnv\" (UID: \"7b68b1ed-70dc-4f97-884d-473b882b10c8\") " pod="openshift-marketplace/community-operators-stgnv" Nov 21 14:25:42 crc kubenswrapper[5133]: I1121 14:25:42.841165 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-stgnv" Nov 21 14:25:43 crc kubenswrapper[5133]: I1121 14:25:43.414352 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-stgnv"] Nov 21 14:25:44 crc kubenswrapper[5133]: I1121 14:25:44.170786 5133 generic.go:334] "Generic (PLEG): container finished" podID="7b68b1ed-70dc-4f97-884d-473b882b10c8" containerID="65ef6dbf0d7d77007af291e6f7c999385badb6139998a7b43774c1e465cf5a8e" exitCode=0 Nov 21 14:25:44 crc kubenswrapper[5133]: I1121 14:25:44.171074 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-stgnv" event={"ID":"7b68b1ed-70dc-4f97-884d-473b882b10c8","Type":"ContainerDied","Data":"65ef6dbf0d7d77007af291e6f7c999385badb6139998a7b43774c1e465cf5a8e"} Nov 21 14:25:44 crc kubenswrapper[5133]: I1121 14:25:44.171220 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-stgnv" event={"ID":"7b68b1ed-70dc-4f97-884d-473b882b10c8","Type":"ContainerStarted","Data":"a2481fa9a4b07795709ac97882c023edf7c90b2344835d99fb5cef885547af12"} Nov 21 14:25:44 crc kubenswrapper[5133]: I1121 14:25:44.457944 5133 scope.go:117] "RemoveContainer" containerID="c196595136e13d3c4c63ae7c08c87ab795a910aa0f4343c6ca13e03e51955a22" Nov 21 14:25:44 crc kubenswrapper[5133]: E1121 14:25:44.458724 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:25:46 crc kubenswrapper[5133]: I1121 14:25:46.198776 5133 generic.go:334] "Generic (PLEG): container finished" podID="7b68b1ed-70dc-4f97-884d-473b882b10c8" containerID="bfdb281e25eba6155140162939919cf80688310246c0341d074a0793c36c27d1" exitCode=0 Nov 21 14:25:46 crc kubenswrapper[5133]: I1121 14:25:46.199729 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-stgnv" event={"ID":"7b68b1ed-70dc-4f97-884d-473b882b10c8","Type":"ContainerDied","Data":"bfdb281e25eba6155140162939919cf80688310246c0341d074a0793c36c27d1"} Nov 21 14:25:47 crc kubenswrapper[5133]: I1121 14:25:47.211450 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-stgnv" event={"ID":"7b68b1ed-70dc-4f97-884d-473b882b10c8","Type":"ContainerStarted","Data":"93111043986e03a164987b97b0ed12a2738862a28b529d3250803ce1d56799db"} Nov 21 14:25:47 crc kubenswrapper[5133]: I1121 14:25:47.237480 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-stgnv" podStartSLOduration=2.701356279 podStartE2EDuration="5.237455452s" podCreationTimestamp="2025-11-21 14:25:42 +0000 UTC" firstStartedPulling="2025-11-21 14:25:44.17555785 +0000 UTC m=+2603.973390088" lastFinishedPulling="2025-11-21 14:25:46.711657003 +0000 UTC m=+2606.509489261" observedRunningTime="2025-11-21 14:25:47.230826006 +0000 UTC m=+2607.028658254" watchObservedRunningTime="2025-11-21 14:25:47.237455452 +0000 UTC m=+2607.035287710" Nov 21 14:25:52 crc kubenswrapper[5133]: I1121 14:25:52.842167 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-stgnv" Nov 21 14:25:52 crc kubenswrapper[5133]: I1121 14:25:52.842805 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-stgnv" Nov 21 14:25:52 crc kubenswrapper[5133]: I1121 14:25:52.928478 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-stgnv" Nov 21 14:25:53 crc kubenswrapper[5133]: I1121 14:25:53.363402 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-stgnv" Nov 21 14:25:53 crc kubenswrapper[5133]: I1121 14:25:53.440434 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-stgnv"] Nov 21 14:25:55 crc kubenswrapper[5133]: I1121 14:25:55.305413 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-stgnv" podUID="7b68b1ed-70dc-4f97-884d-473b882b10c8" containerName="registry-server" containerID="cri-o://93111043986e03a164987b97b0ed12a2738862a28b529d3250803ce1d56799db" gracePeriod=2 Nov 21 14:25:56 crc kubenswrapper[5133]: I1121 14:25:56.287752 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-stgnv" Nov 21 14:25:56 crc kubenswrapper[5133]: I1121 14:25:56.320270 5133 generic.go:334] "Generic (PLEG): container finished" podID="7b68b1ed-70dc-4f97-884d-473b882b10c8" containerID="93111043986e03a164987b97b0ed12a2738862a28b529d3250803ce1d56799db" exitCode=0 Nov 21 14:25:56 crc kubenswrapper[5133]: I1121 14:25:56.320309 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-stgnv" event={"ID":"7b68b1ed-70dc-4f97-884d-473b882b10c8","Type":"ContainerDied","Data":"93111043986e03a164987b97b0ed12a2738862a28b529d3250803ce1d56799db"} Nov 21 14:25:56 crc kubenswrapper[5133]: I1121 14:25:56.320350 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-stgnv" event={"ID":"7b68b1ed-70dc-4f97-884d-473b882b10c8","Type":"ContainerDied","Data":"a2481fa9a4b07795709ac97882c023edf7c90b2344835d99fb5cef885547af12"} Nov 21 14:25:56 crc kubenswrapper[5133]: I1121 14:25:56.321666 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-stgnv" Nov 21 14:25:56 crc kubenswrapper[5133]: I1121 14:25:56.322396 5133 scope.go:117] "RemoveContainer" containerID="93111043986e03a164987b97b0ed12a2738862a28b529d3250803ce1d56799db" Nov 21 14:25:56 crc kubenswrapper[5133]: I1121 14:25:56.362535 5133 scope.go:117] "RemoveContainer" containerID="bfdb281e25eba6155140162939919cf80688310246c0341d074a0793c36c27d1" Nov 21 14:25:56 crc kubenswrapper[5133]: I1121 14:25:56.397945 5133 scope.go:117] "RemoveContainer" containerID="65ef6dbf0d7d77007af291e6f7c999385badb6139998a7b43774c1e465cf5a8e" Nov 21 14:25:56 crc kubenswrapper[5133]: I1121 14:25:56.427315 5133 scope.go:117] "RemoveContainer" containerID="93111043986e03a164987b97b0ed12a2738862a28b529d3250803ce1d56799db" Nov 21 14:25:56 crc kubenswrapper[5133]: E1121 14:25:56.427868 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"93111043986e03a164987b97b0ed12a2738862a28b529d3250803ce1d56799db\": container with ID starting with 93111043986e03a164987b97b0ed12a2738862a28b529d3250803ce1d56799db not found: ID does not exist" containerID="93111043986e03a164987b97b0ed12a2738862a28b529d3250803ce1d56799db" Nov 21 14:25:56 crc kubenswrapper[5133]: I1121 14:25:56.427933 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"93111043986e03a164987b97b0ed12a2738862a28b529d3250803ce1d56799db"} err="failed to get container status \"93111043986e03a164987b97b0ed12a2738862a28b529d3250803ce1d56799db\": rpc error: code = NotFound desc = could not find container \"93111043986e03a164987b97b0ed12a2738862a28b529d3250803ce1d56799db\": container with ID starting with 93111043986e03a164987b97b0ed12a2738862a28b529d3250803ce1d56799db not found: ID does not exist" Nov 21 14:25:56 crc kubenswrapper[5133]: I1121 14:25:56.428067 5133 scope.go:117] "RemoveContainer" containerID="bfdb281e25eba6155140162939919cf80688310246c0341d074a0793c36c27d1" Nov 21 14:25:56 crc kubenswrapper[5133]: E1121 14:25:56.428526 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bfdb281e25eba6155140162939919cf80688310246c0341d074a0793c36c27d1\": container with ID starting with bfdb281e25eba6155140162939919cf80688310246c0341d074a0793c36c27d1 not found: ID does not exist" containerID="bfdb281e25eba6155140162939919cf80688310246c0341d074a0793c36c27d1" Nov 21 14:25:56 crc kubenswrapper[5133]: I1121 14:25:56.428592 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bfdb281e25eba6155140162939919cf80688310246c0341d074a0793c36c27d1"} err="failed to get container status \"bfdb281e25eba6155140162939919cf80688310246c0341d074a0793c36c27d1\": rpc error: code = NotFound desc = could not find container \"bfdb281e25eba6155140162939919cf80688310246c0341d074a0793c36c27d1\": container with ID starting with bfdb281e25eba6155140162939919cf80688310246c0341d074a0793c36c27d1 not found: ID does not exist" Nov 21 14:25:56 crc kubenswrapper[5133]: I1121 14:25:56.428638 5133 scope.go:117] "RemoveContainer" containerID="65ef6dbf0d7d77007af291e6f7c999385badb6139998a7b43774c1e465cf5a8e" Nov 21 14:25:56 crc kubenswrapper[5133]: E1121 14:25:56.429243 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"65ef6dbf0d7d77007af291e6f7c999385badb6139998a7b43774c1e465cf5a8e\": container with ID starting with 65ef6dbf0d7d77007af291e6f7c999385badb6139998a7b43774c1e465cf5a8e not found: ID does not exist" containerID="65ef6dbf0d7d77007af291e6f7c999385badb6139998a7b43774c1e465cf5a8e" Nov 21 14:25:56 crc kubenswrapper[5133]: I1121 14:25:56.429282 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"65ef6dbf0d7d77007af291e6f7c999385badb6139998a7b43774c1e465cf5a8e"} err="failed to get container status \"65ef6dbf0d7d77007af291e6f7c999385badb6139998a7b43774c1e465cf5a8e\": rpc error: code = NotFound desc = could not find container \"65ef6dbf0d7d77007af291e6f7c999385badb6139998a7b43774c1e465cf5a8e\": container with ID starting with 65ef6dbf0d7d77007af291e6f7c999385badb6139998a7b43774c1e465cf5a8e not found: ID does not exist" Nov 21 14:25:56 crc kubenswrapper[5133]: I1121 14:25:56.440043 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7b68b1ed-70dc-4f97-884d-473b882b10c8-catalog-content\") pod \"7b68b1ed-70dc-4f97-884d-473b882b10c8\" (UID: \"7b68b1ed-70dc-4f97-884d-473b882b10c8\") " Nov 21 14:25:56 crc kubenswrapper[5133]: I1121 14:25:56.440130 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7b68b1ed-70dc-4f97-884d-473b882b10c8-utilities\") pod \"7b68b1ed-70dc-4f97-884d-473b882b10c8\" (UID: \"7b68b1ed-70dc-4f97-884d-473b882b10c8\") " Nov 21 14:25:56 crc kubenswrapper[5133]: I1121 14:25:56.440302 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-78th6\" (UniqueName: \"kubernetes.io/projected/7b68b1ed-70dc-4f97-884d-473b882b10c8-kube-api-access-78th6\") pod \"7b68b1ed-70dc-4f97-884d-473b882b10c8\" (UID: \"7b68b1ed-70dc-4f97-884d-473b882b10c8\") " Nov 21 14:25:56 crc kubenswrapper[5133]: I1121 14:25:56.440813 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7b68b1ed-70dc-4f97-884d-473b882b10c8-utilities" (OuterVolumeSpecName: "utilities") pod "7b68b1ed-70dc-4f97-884d-473b882b10c8" (UID: "7b68b1ed-70dc-4f97-884d-473b882b10c8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:25:56 crc kubenswrapper[5133]: I1121 14:25:56.446121 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7b68b1ed-70dc-4f97-884d-473b882b10c8-kube-api-access-78th6" (OuterVolumeSpecName: "kube-api-access-78th6") pod "7b68b1ed-70dc-4f97-884d-473b882b10c8" (UID: "7b68b1ed-70dc-4f97-884d-473b882b10c8"). InnerVolumeSpecName "kube-api-access-78th6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:25:56 crc kubenswrapper[5133]: I1121 14:25:56.542091 5133 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7b68b1ed-70dc-4f97-884d-473b882b10c8-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 14:25:56 crc kubenswrapper[5133]: I1121 14:25:56.542121 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-78th6\" (UniqueName: \"kubernetes.io/projected/7b68b1ed-70dc-4f97-884d-473b882b10c8-kube-api-access-78th6\") on node \"crc\" DevicePath \"\"" Nov 21 14:25:57 crc kubenswrapper[5133]: I1121 14:25:57.166071 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7b68b1ed-70dc-4f97-884d-473b882b10c8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7b68b1ed-70dc-4f97-884d-473b882b10c8" (UID: "7b68b1ed-70dc-4f97-884d-473b882b10c8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:25:57 crc kubenswrapper[5133]: I1121 14:25:57.258155 5133 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7b68b1ed-70dc-4f97-884d-473b882b10c8-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 14:25:57 crc kubenswrapper[5133]: I1121 14:25:57.282363 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-stgnv"] Nov 21 14:25:57 crc kubenswrapper[5133]: I1121 14:25:57.295275 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-stgnv"] Nov 21 14:25:58 crc kubenswrapper[5133]: I1121 14:25:58.458067 5133 scope.go:117] "RemoveContainer" containerID="c196595136e13d3c4c63ae7c08c87ab795a910aa0f4343c6ca13e03e51955a22" Nov 21 14:25:58 crc kubenswrapper[5133]: E1121 14:25:58.458356 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:25:58 crc kubenswrapper[5133]: I1121 14:25:58.489580 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7b68b1ed-70dc-4f97-884d-473b882b10c8" path="/var/lib/kubelet/pods/7b68b1ed-70dc-4f97-884d-473b882b10c8/volumes" Nov 21 14:26:12 crc kubenswrapper[5133]: I1121 14:26:12.462524 5133 scope.go:117] "RemoveContainer" containerID="c196595136e13d3c4c63ae7c08c87ab795a910aa0f4343c6ca13e03e51955a22" Nov 21 14:26:12 crc kubenswrapper[5133]: E1121 14:26:12.463531 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:26:25 crc kubenswrapper[5133]: I1121 14:26:25.457966 5133 scope.go:117] "RemoveContainer" containerID="c196595136e13d3c4c63ae7c08c87ab795a910aa0f4343c6ca13e03e51955a22" Nov 21 14:26:26 crc kubenswrapper[5133]: I1121 14:26:26.608653 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" event={"ID":"52f5a729-05d1-4f84-a216-1df3233af57d","Type":"ContainerStarted","Data":"1f1186518ac0102818dd70469c0c9f88a271d9007ab1854097e5d4f6bcfd7ea2"} Nov 21 14:26:36 crc kubenswrapper[5133]: I1121 14:26:36.709413 5133 generic.go:334] "Generic (PLEG): container finished" podID="bcf3ecf2-fb8c-424f-a9e8-600a16795bed" containerID="be9541cc0a0e3fa77ed6c5ecb233e2df268e56ce683c37fe1a00ad72e23531cf" exitCode=0 Nov 21 14:26:36 crc kubenswrapper[5133]: I1121 14:26:36.709531 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-8x5fz" event={"ID":"bcf3ecf2-fb8c-424f-a9e8-600a16795bed","Type":"ContainerDied","Data":"be9541cc0a0e3fa77ed6c5ecb233e2df268e56ce683c37fe1a00ad72e23531cf"} Nov 21 14:26:38 crc kubenswrapper[5133]: I1121 14:26:38.142281 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-8x5fz" Nov 21 14:26:38 crc kubenswrapper[5133]: I1121 14:26:38.306629 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bcf3ecf2-fb8c-424f-a9e8-600a16795bed-inventory\") pod \"bcf3ecf2-fb8c-424f-a9e8-600a16795bed\" (UID: \"bcf3ecf2-fb8c-424f-a9e8-600a16795bed\") " Nov 21 14:26:38 crc kubenswrapper[5133]: I1121 14:26:38.306786 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bcf3ecf2-fb8c-424f-a9e8-600a16795bed-ssh-key\") pod \"bcf3ecf2-fb8c-424f-a9e8-600a16795bed\" (UID: \"bcf3ecf2-fb8c-424f-a9e8-600a16795bed\") " Nov 21 14:26:38 crc kubenswrapper[5133]: I1121 14:26:38.306839 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/bcf3ecf2-fb8c-424f-a9e8-600a16795bed-ceph\") pod \"bcf3ecf2-fb8c-424f-a9e8-600a16795bed\" (UID: \"bcf3ecf2-fb8c-424f-a9e8-600a16795bed\") " Nov 21 14:26:38 crc kubenswrapper[5133]: I1121 14:26:38.306907 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bcf3ecf2-fb8c-424f-a9e8-600a16795bed-bootstrap-combined-ca-bundle\") pod \"bcf3ecf2-fb8c-424f-a9e8-600a16795bed\" (UID: \"bcf3ecf2-fb8c-424f-a9e8-600a16795bed\") " Nov 21 14:26:38 crc kubenswrapper[5133]: I1121 14:26:38.306973 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h2pxt\" (UniqueName: \"kubernetes.io/projected/bcf3ecf2-fb8c-424f-a9e8-600a16795bed-kube-api-access-h2pxt\") pod \"bcf3ecf2-fb8c-424f-a9e8-600a16795bed\" (UID: \"bcf3ecf2-fb8c-424f-a9e8-600a16795bed\") " Nov 21 14:26:38 crc kubenswrapper[5133]: I1121 14:26:38.312614 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bcf3ecf2-fb8c-424f-a9e8-600a16795bed-kube-api-access-h2pxt" (OuterVolumeSpecName: "kube-api-access-h2pxt") pod "bcf3ecf2-fb8c-424f-a9e8-600a16795bed" (UID: "bcf3ecf2-fb8c-424f-a9e8-600a16795bed"). InnerVolumeSpecName "kube-api-access-h2pxt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:26:38 crc kubenswrapper[5133]: I1121 14:26:38.312724 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bcf3ecf2-fb8c-424f-a9e8-600a16795bed-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "bcf3ecf2-fb8c-424f-a9e8-600a16795bed" (UID: "bcf3ecf2-fb8c-424f-a9e8-600a16795bed"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:26:38 crc kubenswrapper[5133]: I1121 14:26:38.313313 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bcf3ecf2-fb8c-424f-a9e8-600a16795bed-ceph" (OuterVolumeSpecName: "ceph") pod "bcf3ecf2-fb8c-424f-a9e8-600a16795bed" (UID: "bcf3ecf2-fb8c-424f-a9e8-600a16795bed"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:26:38 crc kubenswrapper[5133]: I1121 14:26:38.339497 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bcf3ecf2-fb8c-424f-a9e8-600a16795bed-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "bcf3ecf2-fb8c-424f-a9e8-600a16795bed" (UID: "bcf3ecf2-fb8c-424f-a9e8-600a16795bed"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:26:38 crc kubenswrapper[5133]: I1121 14:26:38.341946 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bcf3ecf2-fb8c-424f-a9e8-600a16795bed-inventory" (OuterVolumeSpecName: "inventory") pod "bcf3ecf2-fb8c-424f-a9e8-600a16795bed" (UID: "bcf3ecf2-fb8c-424f-a9e8-600a16795bed"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:26:38 crc kubenswrapper[5133]: I1121 14:26:38.409662 5133 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bcf3ecf2-fb8c-424f-a9e8-600a16795bed-inventory\") on node \"crc\" DevicePath \"\"" Nov 21 14:26:38 crc kubenswrapper[5133]: I1121 14:26:38.409706 5133 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bcf3ecf2-fb8c-424f-a9e8-600a16795bed-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 21 14:26:38 crc kubenswrapper[5133]: I1121 14:26:38.409733 5133 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/bcf3ecf2-fb8c-424f-a9e8-600a16795bed-ceph\") on node \"crc\" DevicePath \"\"" Nov 21 14:26:38 crc kubenswrapper[5133]: I1121 14:26:38.409757 5133 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bcf3ecf2-fb8c-424f-a9e8-600a16795bed-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 14:26:38 crc kubenswrapper[5133]: I1121 14:26:38.409779 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h2pxt\" (UniqueName: \"kubernetes.io/projected/bcf3ecf2-fb8c-424f-a9e8-600a16795bed-kube-api-access-h2pxt\") on node \"crc\" DevicePath \"\"" Nov 21 14:26:38 crc kubenswrapper[5133]: I1121 14:26:38.783709 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-8x5fz" event={"ID":"bcf3ecf2-fb8c-424f-a9e8-600a16795bed","Type":"ContainerDied","Data":"7c331000f5f3a11a781ab1c49e82b8bae5dfb4e0dbbd207c9d43ccd8ae1f059b"} Nov 21 14:26:38 crc kubenswrapper[5133]: I1121 14:26:38.783752 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7c331000f5f3a11a781ab1c49e82b8bae5dfb4e0dbbd207c9d43ccd8ae1f059b" Nov 21 14:26:38 crc kubenswrapper[5133]: I1121 14:26:38.783833 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-8x5fz" Nov 21 14:26:38 crc kubenswrapper[5133]: I1121 14:26:38.853228 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-f4k82"] Nov 21 14:26:38 crc kubenswrapper[5133]: E1121 14:26:38.853653 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b68b1ed-70dc-4f97-884d-473b882b10c8" containerName="extract-utilities" Nov 21 14:26:38 crc kubenswrapper[5133]: I1121 14:26:38.853674 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b68b1ed-70dc-4f97-884d-473b882b10c8" containerName="extract-utilities" Nov 21 14:26:38 crc kubenswrapper[5133]: E1121 14:26:38.853700 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b68b1ed-70dc-4f97-884d-473b882b10c8" containerName="extract-content" Nov 21 14:26:38 crc kubenswrapper[5133]: I1121 14:26:38.853709 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b68b1ed-70dc-4f97-884d-473b882b10c8" containerName="extract-content" Nov 21 14:26:38 crc kubenswrapper[5133]: E1121 14:26:38.853730 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b68b1ed-70dc-4f97-884d-473b882b10c8" containerName="registry-server" Nov 21 14:26:38 crc kubenswrapper[5133]: I1121 14:26:38.853740 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b68b1ed-70dc-4f97-884d-473b882b10c8" containerName="registry-server" Nov 21 14:26:38 crc kubenswrapper[5133]: E1121 14:26:38.853755 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bcf3ecf2-fb8c-424f-a9e8-600a16795bed" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 21 14:26:38 crc kubenswrapper[5133]: I1121 14:26:38.853765 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="bcf3ecf2-fb8c-424f-a9e8-600a16795bed" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 21 14:26:38 crc kubenswrapper[5133]: I1121 14:26:38.853960 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="7b68b1ed-70dc-4f97-884d-473b882b10c8" containerName="registry-server" Nov 21 14:26:38 crc kubenswrapper[5133]: I1121 14:26:38.853988 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="bcf3ecf2-fb8c-424f-a9e8-600a16795bed" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 21 14:26:38 crc kubenswrapper[5133]: I1121 14:26:38.854666 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-f4k82" Nov 21 14:26:38 crc kubenswrapper[5133]: I1121 14:26:38.856943 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-ztmk4" Nov 21 14:26:38 crc kubenswrapper[5133]: I1121 14:26:38.857304 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 21 14:26:38 crc kubenswrapper[5133]: I1121 14:26:38.857459 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 21 14:26:38 crc kubenswrapper[5133]: I1121 14:26:38.859861 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 21 14:26:38 crc kubenswrapper[5133]: I1121 14:26:38.861066 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 21 14:26:38 crc kubenswrapper[5133]: I1121 14:26:38.864058 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-f4k82"] Nov 21 14:26:38 crc kubenswrapper[5133]: I1121 14:26:38.918587 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b94efc2b-b41e-4cbe-9f19-a89bdc99630c-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-f4k82\" (UID: \"b94efc2b-b41e-4cbe-9f19-a89bdc99630c\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-f4k82" Nov 21 14:26:38 crc kubenswrapper[5133]: I1121 14:26:38.919075 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b94efc2b-b41e-4cbe-9f19-a89bdc99630c-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-f4k82\" (UID: \"b94efc2b-b41e-4cbe-9f19-a89bdc99630c\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-f4k82" Nov 21 14:26:38 crc kubenswrapper[5133]: I1121 14:26:38.919126 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w882x\" (UniqueName: \"kubernetes.io/projected/b94efc2b-b41e-4cbe-9f19-a89bdc99630c-kube-api-access-w882x\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-f4k82\" (UID: \"b94efc2b-b41e-4cbe-9f19-a89bdc99630c\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-f4k82" Nov 21 14:26:38 crc kubenswrapper[5133]: I1121 14:26:38.919175 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/b94efc2b-b41e-4cbe-9f19-a89bdc99630c-ceph\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-f4k82\" (UID: \"b94efc2b-b41e-4cbe-9f19-a89bdc99630c\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-f4k82" Nov 21 14:26:39 crc kubenswrapper[5133]: I1121 14:26:39.020681 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b94efc2b-b41e-4cbe-9f19-a89bdc99630c-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-f4k82\" (UID: \"b94efc2b-b41e-4cbe-9f19-a89bdc99630c\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-f4k82" Nov 21 14:26:39 crc kubenswrapper[5133]: I1121 14:26:39.020753 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b94efc2b-b41e-4cbe-9f19-a89bdc99630c-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-f4k82\" (UID: \"b94efc2b-b41e-4cbe-9f19-a89bdc99630c\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-f4k82" Nov 21 14:26:39 crc kubenswrapper[5133]: I1121 14:26:39.020785 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w882x\" (UniqueName: \"kubernetes.io/projected/b94efc2b-b41e-4cbe-9f19-a89bdc99630c-kube-api-access-w882x\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-f4k82\" (UID: \"b94efc2b-b41e-4cbe-9f19-a89bdc99630c\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-f4k82" Nov 21 14:26:39 crc kubenswrapper[5133]: I1121 14:26:39.020822 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/b94efc2b-b41e-4cbe-9f19-a89bdc99630c-ceph\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-f4k82\" (UID: \"b94efc2b-b41e-4cbe-9f19-a89bdc99630c\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-f4k82" Nov 21 14:26:39 crc kubenswrapper[5133]: I1121 14:26:39.025581 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/b94efc2b-b41e-4cbe-9f19-a89bdc99630c-ceph\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-f4k82\" (UID: \"b94efc2b-b41e-4cbe-9f19-a89bdc99630c\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-f4k82" Nov 21 14:26:39 crc kubenswrapper[5133]: I1121 14:26:39.026119 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b94efc2b-b41e-4cbe-9f19-a89bdc99630c-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-f4k82\" (UID: \"b94efc2b-b41e-4cbe-9f19-a89bdc99630c\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-f4k82" Nov 21 14:26:39 crc kubenswrapper[5133]: I1121 14:26:39.035759 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b94efc2b-b41e-4cbe-9f19-a89bdc99630c-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-f4k82\" (UID: \"b94efc2b-b41e-4cbe-9f19-a89bdc99630c\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-f4k82" Nov 21 14:26:39 crc kubenswrapper[5133]: I1121 14:26:39.040266 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w882x\" (UniqueName: \"kubernetes.io/projected/b94efc2b-b41e-4cbe-9f19-a89bdc99630c-kube-api-access-w882x\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-f4k82\" (UID: \"b94efc2b-b41e-4cbe-9f19-a89bdc99630c\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-f4k82" Nov 21 14:26:39 crc kubenswrapper[5133]: I1121 14:26:39.176169 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-f4k82" Nov 21 14:26:39 crc kubenswrapper[5133]: I1121 14:26:39.713173 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-f4k82"] Nov 21 14:26:39 crc kubenswrapper[5133]: I1121 14:26:39.791343 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-f4k82" event={"ID":"b94efc2b-b41e-4cbe-9f19-a89bdc99630c","Type":"ContainerStarted","Data":"ec0a19ea7badc73f5d6f15eb4227bc63166e41d5a8337e5800f1a982d7d0e256"} Nov 21 14:26:40 crc kubenswrapper[5133]: I1121 14:26:40.803681 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-f4k82" event={"ID":"b94efc2b-b41e-4cbe-9f19-a89bdc99630c","Type":"ContainerStarted","Data":"70ea8907051792fcc53c70e4bcedf483d39c58c4a1b676f52225e3dbf99b55e6"} Nov 21 14:26:40 crc kubenswrapper[5133]: I1121 14:26:40.825191 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-f4k82" podStartSLOduration=2.325070859 podStartE2EDuration="2.825171693s" podCreationTimestamp="2025-11-21 14:26:38 +0000 UTC" firstStartedPulling="2025-11-21 14:26:39.717450764 +0000 UTC m=+2659.515283022" lastFinishedPulling="2025-11-21 14:26:40.217551568 +0000 UTC m=+2660.015383856" observedRunningTime="2025-11-21 14:26:40.821845884 +0000 UTC m=+2660.619678142" watchObservedRunningTime="2025-11-21 14:26:40.825171693 +0000 UTC m=+2660.623003951" Nov 21 14:27:07 crc kubenswrapper[5133]: I1121 14:27:07.108024 5133 generic.go:334] "Generic (PLEG): container finished" podID="b94efc2b-b41e-4cbe-9f19-a89bdc99630c" containerID="70ea8907051792fcc53c70e4bcedf483d39c58c4a1b676f52225e3dbf99b55e6" exitCode=0 Nov 21 14:27:07 crc kubenswrapper[5133]: I1121 14:27:07.108243 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-f4k82" event={"ID":"b94efc2b-b41e-4cbe-9f19-a89bdc99630c","Type":"ContainerDied","Data":"70ea8907051792fcc53c70e4bcedf483d39c58c4a1b676f52225e3dbf99b55e6"} Nov 21 14:27:08 crc kubenswrapper[5133]: I1121 14:27:08.567985 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-f4k82" Nov 21 14:27:08 crc kubenswrapper[5133]: I1121 14:27:08.757341 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/b94efc2b-b41e-4cbe-9f19-a89bdc99630c-ceph\") pod \"b94efc2b-b41e-4cbe-9f19-a89bdc99630c\" (UID: \"b94efc2b-b41e-4cbe-9f19-a89bdc99630c\") " Nov 21 14:27:08 crc kubenswrapper[5133]: I1121 14:27:08.757400 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b94efc2b-b41e-4cbe-9f19-a89bdc99630c-ssh-key\") pod \"b94efc2b-b41e-4cbe-9f19-a89bdc99630c\" (UID: \"b94efc2b-b41e-4cbe-9f19-a89bdc99630c\") " Nov 21 14:27:08 crc kubenswrapper[5133]: I1121 14:27:08.757441 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w882x\" (UniqueName: \"kubernetes.io/projected/b94efc2b-b41e-4cbe-9f19-a89bdc99630c-kube-api-access-w882x\") pod \"b94efc2b-b41e-4cbe-9f19-a89bdc99630c\" (UID: \"b94efc2b-b41e-4cbe-9f19-a89bdc99630c\") " Nov 21 14:27:08 crc kubenswrapper[5133]: I1121 14:27:08.757467 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b94efc2b-b41e-4cbe-9f19-a89bdc99630c-inventory\") pod \"b94efc2b-b41e-4cbe-9f19-a89bdc99630c\" (UID: \"b94efc2b-b41e-4cbe-9f19-a89bdc99630c\") " Nov 21 14:27:08 crc kubenswrapper[5133]: I1121 14:27:08.763203 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b94efc2b-b41e-4cbe-9f19-a89bdc99630c-kube-api-access-w882x" (OuterVolumeSpecName: "kube-api-access-w882x") pod "b94efc2b-b41e-4cbe-9f19-a89bdc99630c" (UID: "b94efc2b-b41e-4cbe-9f19-a89bdc99630c"). InnerVolumeSpecName "kube-api-access-w882x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:27:08 crc kubenswrapper[5133]: I1121 14:27:08.763553 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b94efc2b-b41e-4cbe-9f19-a89bdc99630c-ceph" (OuterVolumeSpecName: "ceph") pod "b94efc2b-b41e-4cbe-9f19-a89bdc99630c" (UID: "b94efc2b-b41e-4cbe-9f19-a89bdc99630c"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:27:08 crc kubenswrapper[5133]: I1121 14:27:08.786753 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b94efc2b-b41e-4cbe-9f19-a89bdc99630c-inventory" (OuterVolumeSpecName: "inventory") pod "b94efc2b-b41e-4cbe-9f19-a89bdc99630c" (UID: "b94efc2b-b41e-4cbe-9f19-a89bdc99630c"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:27:08 crc kubenswrapper[5133]: I1121 14:27:08.800157 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b94efc2b-b41e-4cbe-9f19-a89bdc99630c-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "b94efc2b-b41e-4cbe-9f19-a89bdc99630c" (UID: "b94efc2b-b41e-4cbe-9f19-a89bdc99630c"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:27:08 crc kubenswrapper[5133]: I1121 14:27:08.859866 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w882x\" (UniqueName: \"kubernetes.io/projected/b94efc2b-b41e-4cbe-9f19-a89bdc99630c-kube-api-access-w882x\") on node \"crc\" DevicePath \"\"" Nov 21 14:27:08 crc kubenswrapper[5133]: I1121 14:27:08.859908 5133 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b94efc2b-b41e-4cbe-9f19-a89bdc99630c-inventory\") on node \"crc\" DevicePath \"\"" Nov 21 14:27:08 crc kubenswrapper[5133]: I1121 14:27:08.859916 5133 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/b94efc2b-b41e-4cbe-9f19-a89bdc99630c-ceph\") on node \"crc\" DevicePath \"\"" Nov 21 14:27:08 crc kubenswrapper[5133]: I1121 14:27:08.859925 5133 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b94efc2b-b41e-4cbe-9f19-a89bdc99630c-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 21 14:27:09 crc kubenswrapper[5133]: I1121 14:27:09.129501 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-f4k82" event={"ID":"b94efc2b-b41e-4cbe-9f19-a89bdc99630c","Type":"ContainerDied","Data":"ec0a19ea7badc73f5d6f15eb4227bc63166e41d5a8337e5800f1a982d7d0e256"} Nov 21 14:27:09 crc kubenswrapper[5133]: I1121 14:27:09.129558 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ec0a19ea7badc73f5d6f15eb4227bc63166e41d5a8337e5800f1a982d7d0e256" Nov 21 14:27:09 crc kubenswrapper[5133]: I1121 14:27:09.129613 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-f4k82" Nov 21 14:27:09 crc kubenswrapper[5133]: I1121 14:27:09.233470 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-2ngdc"] Nov 21 14:27:09 crc kubenswrapper[5133]: E1121 14:27:09.234437 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b94efc2b-b41e-4cbe-9f19-a89bdc99630c" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 21 14:27:09 crc kubenswrapper[5133]: I1121 14:27:09.234546 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="b94efc2b-b41e-4cbe-9f19-a89bdc99630c" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 21 14:27:09 crc kubenswrapper[5133]: I1121 14:27:09.234861 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="b94efc2b-b41e-4cbe-9f19-a89bdc99630c" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 21 14:27:09 crc kubenswrapper[5133]: I1121 14:27:09.235868 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-2ngdc" Nov 21 14:27:09 crc kubenswrapper[5133]: I1121 14:27:09.238715 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 21 14:27:09 crc kubenswrapper[5133]: I1121 14:27:09.239272 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 21 14:27:09 crc kubenswrapper[5133]: I1121 14:27:09.239533 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 21 14:27:09 crc kubenswrapper[5133]: I1121 14:27:09.239750 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-ztmk4" Nov 21 14:27:09 crc kubenswrapper[5133]: I1121 14:27:09.239947 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 21 14:27:09 crc kubenswrapper[5133]: I1121 14:27:09.248558 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-2ngdc"] Nov 21 14:27:09 crc kubenswrapper[5133]: I1121 14:27:09.373880 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bggwr\" (UniqueName: \"kubernetes.io/projected/45a6b2e7-10f7-44d7-8293-bc3c32758294-kube-api-access-bggwr\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-2ngdc\" (UID: \"45a6b2e7-10f7-44d7-8293-bc3c32758294\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-2ngdc" Nov 21 14:27:09 crc kubenswrapper[5133]: I1121 14:27:09.373956 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/45a6b2e7-10f7-44d7-8293-bc3c32758294-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-2ngdc\" (UID: \"45a6b2e7-10f7-44d7-8293-bc3c32758294\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-2ngdc" Nov 21 14:27:09 crc kubenswrapper[5133]: I1121 14:27:09.374091 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/45a6b2e7-10f7-44d7-8293-bc3c32758294-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-2ngdc\" (UID: \"45a6b2e7-10f7-44d7-8293-bc3c32758294\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-2ngdc" Nov 21 14:27:09 crc kubenswrapper[5133]: I1121 14:27:09.374150 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/45a6b2e7-10f7-44d7-8293-bc3c32758294-ceph\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-2ngdc\" (UID: \"45a6b2e7-10f7-44d7-8293-bc3c32758294\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-2ngdc" Nov 21 14:27:09 crc kubenswrapper[5133]: I1121 14:27:09.475913 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/45a6b2e7-10f7-44d7-8293-bc3c32758294-ceph\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-2ngdc\" (UID: \"45a6b2e7-10f7-44d7-8293-bc3c32758294\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-2ngdc" Nov 21 14:27:09 crc kubenswrapper[5133]: I1121 14:27:09.475977 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bggwr\" (UniqueName: \"kubernetes.io/projected/45a6b2e7-10f7-44d7-8293-bc3c32758294-kube-api-access-bggwr\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-2ngdc\" (UID: \"45a6b2e7-10f7-44d7-8293-bc3c32758294\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-2ngdc" Nov 21 14:27:09 crc kubenswrapper[5133]: I1121 14:27:09.476063 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/45a6b2e7-10f7-44d7-8293-bc3c32758294-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-2ngdc\" (UID: \"45a6b2e7-10f7-44d7-8293-bc3c32758294\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-2ngdc" Nov 21 14:27:09 crc kubenswrapper[5133]: I1121 14:27:09.476165 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/45a6b2e7-10f7-44d7-8293-bc3c32758294-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-2ngdc\" (UID: \"45a6b2e7-10f7-44d7-8293-bc3c32758294\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-2ngdc" Nov 21 14:27:09 crc kubenswrapper[5133]: I1121 14:27:09.490176 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/45a6b2e7-10f7-44d7-8293-bc3c32758294-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-2ngdc\" (UID: \"45a6b2e7-10f7-44d7-8293-bc3c32758294\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-2ngdc" Nov 21 14:27:09 crc kubenswrapper[5133]: I1121 14:27:09.490281 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/45a6b2e7-10f7-44d7-8293-bc3c32758294-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-2ngdc\" (UID: \"45a6b2e7-10f7-44d7-8293-bc3c32758294\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-2ngdc" Nov 21 14:27:09 crc kubenswrapper[5133]: I1121 14:27:09.490427 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/45a6b2e7-10f7-44d7-8293-bc3c32758294-ceph\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-2ngdc\" (UID: \"45a6b2e7-10f7-44d7-8293-bc3c32758294\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-2ngdc" Nov 21 14:27:09 crc kubenswrapper[5133]: I1121 14:27:09.505537 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bggwr\" (UniqueName: \"kubernetes.io/projected/45a6b2e7-10f7-44d7-8293-bc3c32758294-kube-api-access-bggwr\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-2ngdc\" (UID: \"45a6b2e7-10f7-44d7-8293-bc3c32758294\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-2ngdc" Nov 21 14:27:09 crc kubenswrapper[5133]: I1121 14:27:09.559548 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-2ngdc" Nov 21 14:27:10 crc kubenswrapper[5133]: I1121 14:27:10.116772 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-2ngdc"] Nov 21 14:27:10 crc kubenswrapper[5133]: I1121 14:27:10.137479 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-2ngdc" event={"ID":"45a6b2e7-10f7-44d7-8293-bc3c32758294","Type":"ContainerStarted","Data":"a07e58e9e8e172d0e7a6d92450203b2c10ae3197d96e993568951c35880fb5f8"} Nov 21 14:27:11 crc kubenswrapper[5133]: I1121 14:27:11.145658 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-2ngdc" event={"ID":"45a6b2e7-10f7-44d7-8293-bc3c32758294","Type":"ContainerStarted","Data":"220ce74ec5d07635312e0e4d47d27bbca74a9c35080869a826946d1c05193f9c"} Nov 21 14:27:11 crc kubenswrapper[5133]: I1121 14:27:11.162228 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-2ngdc" podStartSLOduration=1.720898197 podStartE2EDuration="2.16220028s" podCreationTimestamp="2025-11-21 14:27:09 +0000 UTC" firstStartedPulling="2025-11-21 14:27:10.124625685 +0000 UTC m=+2689.922457923" lastFinishedPulling="2025-11-21 14:27:10.565927758 +0000 UTC m=+2690.363760006" observedRunningTime="2025-11-21 14:27:11.15766163 +0000 UTC m=+2690.955493888" watchObservedRunningTime="2025-11-21 14:27:11.16220028 +0000 UTC m=+2690.960032538" Nov 21 14:27:16 crc kubenswrapper[5133]: I1121 14:27:16.197957 5133 generic.go:334] "Generic (PLEG): container finished" podID="45a6b2e7-10f7-44d7-8293-bc3c32758294" containerID="220ce74ec5d07635312e0e4d47d27bbca74a9c35080869a826946d1c05193f9c" exitCode=0 Nov 21 14:27:16 crc kubenswrapper[5133]: I1121 14:27:16.198156 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-2ngdc" event={"ID":"45a6b2e7-10f7-44d7-8293-bc3c32758294","Type":"ContainerDied","Data":"220ce74ec5d07635312e0e4d47d27bbca74a9c35080869a826946d1c05193f9c"} Nov 21 14:27:17 crc kubenswrapper[5133]: I1121 14:27:17.715859 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-2ngdc" Nov 21 14:27:17 crc kubenswrapper[5133]: I1121 14:27:17.843188 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/45a6b2e7-10f7-44d7-8293-bc3c32758294-ssh-key\") pod \"45a6b2e7-10f7-44d7-8293-bc3c32758294\" (UID: \"45a6b2e7-10f7-44d7-8293-bc3c32758294\") " Nov 21 14:27:17 crc kubenswrapper[5133]: I1121 14:27:17.843296 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/45a6b2e7-10f7-44d7-8293-bc3c32758294-ceph\") pod \"45a6b2e7-10f7-44d7-8293-bc3c32758294\" (UID: \"45a6b2e7-10f7-44d7-8293-bc3c32758294\") " Nov 21 14:27:17 crc kubenswrapper[5133]: I1121 14:27:17.843355 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/45a6b2e7-10f7-44d7-8293-bc3c32758294-inventory\") pod \"45a6b2e7-10f7-44d7-8293-bc3c32758294\" (UID: \"45a6b2e7-10f7-44d7-8293-bc3c32758294\") " Nov 21 14:27:17 crc kubenswrapper[5133]: I1121 14:27:17.843560 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bggwr\" (UniqueName: \"kubernetes.io/projected/45a6b2e7-10f7-44d7-8293-bc3c32758294-kube-api-access-bggwr\") pod \"45a6b2e7-10f7-44d7-8293-bc3c32758294\" (UID: \"45a6b2e7-10f7-44d7-8293-bc3c32758294\") " Nov 21 14:27:17 crc kubenswrapper[5133]: I1121 14:27:17.859471 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/45a6b2e7-10f7-44d7-8293-bc3c32758294-kube-api-access-bggwr" (OuterVolumeSpecName: "kube-api-access-bggwr") pod "45a6b2e7-10f7-44d7-8293-bc3c32758294" (UID: "45a6b2e7-10f7-44d7-8293-bc3c32758294"). InnerVolumeSpecName "kube-api-access-bggwr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:27:17 crc kubenswrapper[5133]: I1121 14:27:17.859550 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/45a6b2e7-10f7-44d7-8293-bc3c32758294-ceph" (OuterVolumeSpecName: "ceph") pod "45a6b2e7-10f7-44d7-8293-bc3c32758294" (UID: "45a6b2e7-10f7-44d7-8293-bc3c32758294"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:27:17 crc kubenswrapper[5133]: I1121 14:27:17.893726 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/45a6b2e7-10f7-44d7-8293-bc3c32758294-inventory" (OuterVolumeSpecName: "inventory") pod "45a6b2e7-10f7-44d7-8293-bc3c32758294" (UID: "45a6b2e7-10f7-44d7-8293-bc3c32758294"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:27:17 crc kubenswrapper[5133]: I1121 14:27:17.895485 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/45a6b2e7-10f7-44d7-8293-bc3c32758294-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "45a6b2e7-10f7-44d7-8293-bc3c32758294" (UID: "45a6b2e7-10f7-44d7-8293-bc3c32758294"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:27:17 crc kubenswrapper[5133]: I1121 14:27:17.946665 5133 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/45a6b2e7-10f7-44d7-8293-bc3c32758294-inventory\") on node \"crc\" DevicePath \"\"" Nov 21 14:27:17 crc kubenswrapper[5133]: I1121 14:27:17.946717 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bggwr\" (UniqueName: \"kubernetes.io/projected/45a6b2e7-10f7-44d7-8293-bc3c32758294-kube-api-access-bggwr\") on node \"crc\" DevicePath \"\"" Nov 21 14:27:17 crc kubenswrapper[5133]: I1121 14:27:17.946736 5133 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/45a6b2e7-10f7-44d7-8293-bc3c32758294-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 21 14:27:17 crc kubenswrapper[5133]: I1121 14:27:17.946753 5133 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/45a6b2e7-10f7-44d7-8293-bc3c32758294-ceph\") on node \"crc\" DevicePath \"\"" Nov 21 14:27:18 crc kubenswrapper[5133]: I1121 14:27:18.228901 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-2ngdc" event={"ID":"45a6b2e7-10f7-44d7-8293-bc3c32758294","Type":"ContainerDied","Data":"a07e58e9e8e172d0e7a6d92450203b2c10ae3197d96e993568951c35880fb5f8"} Nov 21 14:27:18 crc kubenswrapper[5133]: I1121 14:27:18.228963 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a07e58e9e8e172d0e7a6d92450203b2c10ae3197d96e993568951c35880fb5f8" Nov 21 14:27:18 crc kubenswrapper[5133]: I1121 14:27:18.229029 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-2ngdc" Nov 21 14:27:18 crc kubenswrapper[5133]: I1121 14:27:18.309510 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-s6c4j"] Nov 21 14:27:18 crc kubenswrapper[5133]: E1121 14:27:18.309964 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45a6b2e7-10f7-44d7-8293-bc3c32758294" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 21 14:27:18 crc kubenswrapper[5133]: I1121 14:27:18.309990 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="45a6b2e7-10f7-44d7-8293-bc3c32758294" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 21 14:27:18 crc kubenswrapper[5133]: I1121 14:27:18.310265 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="45a6b2e7-10f7-44d7-8293-bc3c32758294" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 21 14:27:18 crc kubenswrapper[5133]: I1121 14:27:18.311032 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-s6c4j" Nov 21 14:27:18 crc kubenswrapper[5133]: I1121 14:27:18.313665 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 21 14:27:18 crc kubenswrapper[5133]: I1121 14:27:18.313888 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-ztmk4" Nov 21 14:27:18 crc kubenswrapper[5133]: I1121 14:27:18.314372 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 21 14:27:18 crc kubenswrapper[5133]: I1121 14:27:18.314572 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 21 14:27:18 crc kubenswrapper[5133]: I1121 14:27:18.314743 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 21 14:27:18 crc kubenswrapper[5133]: I1121 14:27:18.324623 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-s6c4j"] Nov 21 14:27:18 crc kubenswrapper[5133]: E1121 14:27:18.348862 5133 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod45a6b2e7_10f7_44d7_8293_bc3c32758294.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod45a6b2e7_10f7_44d7_8293_bc3c32758294.slice/crio-a07e58e9e8e172d0e7a6d92450203b2c10ae3197d96e993568951c35880fb5f8\": RecentStats: unable to find data in memory cache]" Nov 21 14:27:18 crc kubenswrapper[5133]: I1121 14:27:18.353699 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/021cf83b-1e98-445e-a69b-7a6509718a2f-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-s6c4j\" (UID: \"021cf83b-1e98-445e-a69b-7a6509718a2f\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-s6c4j" Nov 21 14:27:18 crc kubenswrapper[5133]: I1121 14:27:18.353771 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/021cf83b-1e98-445e-a69b-7a6509718a2f-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-s6c4j\" (UID: \"021cf83b-1e98-445e-a69b-7a6509718a2f\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-s6c4j" Nov 21 14:27:18 crc kubenswrapper[5133]: I1121 14:27:18.353944 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8xbrp\" (UniqueName: \"kubernetes.io/projected/021cf83b-1e98-445e-a69b-7a6509718a2f-kube-api-access-8xbrp\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-s6c4j\" (UID: \"021cf83b-1e98-445e-a69b-7a6509718a2f\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-s6c4j" Nov 21 14:27:18 crc kubenswrapper[5133]: I1121 14:27:18.354214 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/021cf83b-1e98-445e-a69b-7a6509718a2f-ceph\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-s6c4j\" (UID: \"021cf83b-1e98-445e-a69b-7a6509718a2f\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-s6c4j" Nov 21 14:27:18 crc kubenswrapper[5133]: I1121 14:27:18.454906 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/021cf83b-1e98-445e-a69b-7a6509718a2f-ceph\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-s6c4j\" (UID: \"021cf83b-1e98-445e-a69b-7a6509718a2f\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-s6c4j" Nov 21 14:27:18 crc kubenswrapper[5133]: I1121 14:27:18.457520 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/021cf83b-1e98-445e-a69b-7a6509718a2f-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-s6c4j\" (UID: \"021cf83b-1e98-445e-a69b-7a6509718a2f\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-s6c4j" Nov 21 14:27:18 crc kubenswrapper[5133]: I1121 14:27:18.458291 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/021cf83b-1e98-445e-a69b-7a6509718a2f-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-s6c4j\" (UID: \"021cf83b-1e98-445e-a69b-7a6509718a2f\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-s6c4j" Nov 21 14:27:18 crc kubenswrapper[5133]: I1121 14:27:18.458449 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8xbrp\" (UniqueName: \"kubernetes.io/projected/021cf83b-1e98-445e-a69b-7a6509718a2f-kube-api-access-8xbrp\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-s6c4j\" (UID: \"021cf83b-1e98-445e-a69b-7a6509718a2f\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-s6c4j" Nov 21 14:27:18 crc kubenswrapper[5133]: I1121 14:27:18.458575 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/021cf83b-1e98-445e-a69b-7a6509718a2f-ceph\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-s6c4j\" (UID: \"021cf83b-1e98-445e-a69b-7a6509718a2f\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-s6c4j" Nov 21 14:27:18 crc kubenswrapper[5133]: I1121 14:27:18.461043 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/021cf83b-1e98-445e-a69b-7a6509718a2f-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-s6c4j\" (UID: \"021cf83b-1e98-445e-a69b-7a6509718a2f\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-s6c4j" Nov 21 14:27:18 crc kubenswrapper[5133]: I1121 14:27:18.462858 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/021cf83b-1e98-445e-a69b-7a6509718a2f-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-s6c4j\" (UID: \"021cf83b-1e98-445e-a69b-7a6509718a2f\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-s6c4j" Nov 21 14:27:18 crc kubenswrapper[5133]: I1121 14:27:18.477978 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8xbrp\" (UniqueName: \"kubernetes.io/projected/021cf83b-1e98-445e-a69b-7a6509718a2f-kube-api-access-8xbrp\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-s6c4j\" (UID: \"021cf83b-1e98-445e-a69b-7a6509718a2f\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-s6c4j" Nov 21 14:27:18 crc kubenswrapper[5133]: I1121 14:27:18.638821 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-s6c4j" Nov 21 14:27:19 crc kubenswrapper[5133]: I1121 14:27:19.016990 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-s6c4j"] Nov 21 14:27:19 crc kubenswrapper[5133]: I1121 14:27:19.240069 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-s6c4j" event={"ID":"021cf83b-1e98-445e-a69b-7a6509718a2f","Type":"ContainerStarted","Data":"ba16ee3bbbdec42c3eede95ec6877abc7bac90585c5c2aa56ac28f5486f49d30"} Nov 21 14:27:20 crc kubenswrapper[5133]: I1121 14:27:20.248915 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-s6c4j" event={"ID":"021cf83b-1e98-445e-a69b-7a6509718a2f","Type":"ContainerStarted","Data":"dc9c080a7b2c40537e863081dbd19f81315c34b2fd9f6578b6da0b70a65e8d02"} Nov 21 14:27:20 crc kubenswrapper[5133]: I1121 14:27:20.272710 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-s6c4j" podStartSLOduration=1.815084482 podStartE2EDuration="2.272692357s" podCreationTimestamp="2025-11-21 14:27:18 +0000 UTC" firstStartedPulling="2025-11-21 14:27:19.024936375 +0000 UTC m=+2698.822768623" lastFinishedPulling="2025-11-21 14:27:19.48254424 +0000 UTC m=+2699.280376498" observedRunningTime="2025-11-21 14:27:20.268179867 +0000 UTC m=+2700.066012125" watchObservedRunningTime="2025-11-21 14:27:20.272692357 +0000 UTC m=+2700.070524615" Nov 21 14:27:58 crc kubenswrapper[5133]: I1121 14:27:58.942033 5133 generic.go:334] "Generic (PLEG): container finished" podID="021cf83b-1e98-445e-a69b-7a6509718a2f" containerID="dc9c080a7b2c40537e863081dbd19f81315c34b2fd9f6578b6da0b70a65e8d02" exitCode=0 Nov 21 14:27:58 crc kubenswrapper[5133]: I1121 14:27:58.942098 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-s6c4j" event={"ID":"021cf83b-1e98-445e-a69b-7a6509718a2f","Type":"ContainerDied","Data":"dc9c080a7b2c40537e863081dbd19f81315c34b2fd9f6578b6da0b70a65e8d02"} Nov 21 14:28:00 crc kubenswrapper[5133]: I1121 14:28:00.451427 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-s6c4j" Nov 21 14:28:00 crc kubenswrapper[5133]: I1121 14:28:00.594690 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/021cf83b-1e98-445e-a69b-7a6509718a2f-ssh-key\") pod \"021cf83b-1e98-445e-a69b-7a6509718a2f\" (UID: \"021cf83b-1e98-445e-a69b-7a6509718a2f\") " Nov 21 14:28:00 crc kubenswrapper[5133]: I1121 14:28:00.595547 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/021cf83b-1e98-445e-a69b-7a6509718a2f-ceph\") pod \"021cf83b-1e98-445e-a69b-7a6509718a2f\" (UID: \"021cf83b-1e98-445e-a69b-7a6509718a2f\") " Nov 21 14:28:00 crc kubenswrapper[5133]: I1121 14:28:00.595709 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/021cf83b-1e98-445e-a69b-7a6509718a2f-inventory\") pod \"021cf83b-1e98-445e-a69b-7a6509718a2f\" (UID: \"021cf83b-1e98-445e-a69b-7a6509718a2f\") " Nov 21 14:28:00 crc kubenswrapper[5133]: I1121 14:28:00.595792 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8xbrp\" (UniqueName: \"kubernetes.io/projected/021cf83b-1e98-445e-a69b-7a6509718a2f-kube-api-access-8xbrp\") pod \"021cf83b-1e98-445e-a69b-7a6509718a2f\" (UID: \"021cf83b-1e98-445e-a69b-7a6509718a2f\") " Nov 21 14:28:00 crc kubenswrapper[5133]: I1121 14:28:00.601922 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/021cf83b-1e98-445e-a69b-7a6509718a2f-ceph" (OuterVolumeSpecName: "ceph") pod "021cf83b-1e98-445e-a69b-7a6509718a2f" (UID: "021cf83b-1e98-445e-a69b-7a6509718a2f"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:28:00 crc kubenswrapper[5133]: I1121 14:28:00.603934 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/021cf83b-1e98-445e-a69b-7a6509718a2f-kube-api-access-8xbrp" (OuterVolumeSpecName: "kube-api-access-8xbrp") pod "021cf83b-1e98-445e-a69b-7a6509718a2f" (UID: "021cf83b-1e98-445e-a69b-7a6509718a2f"). InnerVolumeSpecName "kube-api-access-8xbrp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:28:00 crc kubenswrapper[5133]: I1121 14:28:00.633162 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/021cf83b-1e98-445e-a69b-7a6509718a2f-inventory" (OuterVolumeSpecName: "inventory") pod "021cf83b-1e98-445e-a69b-7a6509718a2f" (UID: "021cf83b-1e98-445e-a69b-7a6509718a2f"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:28:00 crc kubenswrapper[5133]: I1121 14:28:00.634208 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/021cf83b-1e98-445e-a69b-7a6509718a2f-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "021cf83b-1e98-445e-a69b-7a6509718a2f" (UID: "021cf83b-1e98-445e-a69b-7a6509718a2f"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:28:00 crc kubenswrapper[5133]: I1121 14:28:00.698283 5133 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/021cf83b-1e98-445e-a69b-7a6509718a2f-inventory\") on node \"crc\" DevicePath \"\"" Nov 21 14:28:00 crc kubenswrapper[5133]: I1121 14:28:00.698325 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8xbrp\" (UniqueName: \"kubernetes.io/projected/021cf83b-1e98-445e-a69b-7a6509718a2f-kube-api-access-8xbrp\") on node \"crc\" DevicePath \"\"" Nov 21 14:28:00 crc kubenswrapper[5133]: I1121 14:28:00.698347 5133 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/021cf83b-1e98-445e-a69b-7a6509718a2f-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 21 14:28:00 crc kubenswrapper[5133]: I1121 14:28:00.698362 5133 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/021cf83b-1e98-445e-a69b-7a6509718a2f-ceph\") on node \"crc\" DevicePath \"\"" Nov 21 14:28:00 crc kubenswrapper[5133]: I1121 14:28:00.969333 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-s6c4j" event={"ID":"021cf83b-1e98-445e-a69b-7a6509718a2f","Type":"ContainerDied","Data":"ba16ee3bbbdec42c3eede95ec6877abc7bac90585c5c2aa56ac28f5486f49d30"} Nov 21 14:28:00 crc kubenswrapper[5133]: I1121 14:28:00.969404 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ba16ee3bbbdec42c3eede95ec6877abc7bac90585c5c2aa56ac28f5486f49d30" Nov 21 14:28:00 crc kubenswrapper[5133]: I1121 14:28:00.969424 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-s6c4j" Nov 21 14:28:01 crc kubenswrapper[5133]: I1121 14:28:01.072706 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xwfg8"] Nov 21 14:28:01 crc kubenswrapper[5133]: E1121 14:28:01.073133 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="021cf83b-1e98-445e-a69b-7a6509718a2f" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 21 14:28:01 crc kubenswrapper[5133]: I1121 14:28:01.073155 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="021cf83b-1e98-445e-a69b-7a6509718a2f" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 21 14:28:01 crc kubenswrapper[5133]: I1121 14:28:01.073386 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="021cf83b-1e98-445e-a69b-7a6509718a2f" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 21 14:28:01 crc kubenswrapper[5133]: I1121 14:28:01.074047 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xwfg8" Nov 21 14:28:01 crc kubenswrapper[5133]: I1121 14:28:01.076550 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 21 14:28:01 crc kubenswrapper[5133]: I1121 14:28:01.076620 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-ztmk4" Nov 21 14:28:01 crc kubenswrapper[5133]: I1121 14:28:01.078114 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 21 14:28:01 crc kubenswrapper[5133]: I1121 14:28:01.078152 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 21 14:28:01 crc kubenswrapper[5133]: I1121 14:28:01.079268 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 21 14:28:01 crc kubenswrapper[5133]: I1121 14:28:01.085674 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xwfg8"] Nov 21 14:28:01 crc kubenswrapper[5133]: I1121 14:28:01.209416 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gprjj\" (UniqueName: \"kubernetes.io/projected/1c45492b-4ba6-48e2-996e-807c20ed7852-kube-api-access-gprjj\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xwfg8\" (UID: \"1c45492b-4ba6-48e2-996e-807c20ed7852\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xwfg8" Nov 21 14:28:01 crc kubenswrapper[5133]: I1121 14:28:01.209499 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1c45492b-4ba6-48e2-996e-807c20ed7852-ssh-key\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xwfg8\" (UID: \"1c45492b-4ba6-48e2-996e-807c20ed7852\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xwfg8" Nov 21 14:28:01 crc kubenswrapper[5133]: I1121 14:28:01.209569 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/1c45492b-4ba6-48e2-996e-807c20ed7852-ceph\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xwfg8\" (UID: \"1c45492b-4ba6-48e2-996e-807c20ed7852\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xwfg8" Nov 21 14:28:01 crc kubenswrapper[5133]: I1121 14:28:01.209733 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1c45492b-4ba6-48e2-996e-807c20ed7852-inventory\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xwfg8\" (UID: \"1c45492b-4ba6-48e2-996e-807c20ed7852\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xwfg8" Nov 21 14:28:01 crc kubenswrapper[5133]: I1121 14:28:01.311190 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gprjj\" (UniqueName: \"kubernetes.io/projected/1c45492b-4ba6-48e2-996e-807c20ed7852-kube-api-access-gprjj\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xwfg8\" (UID: \"1c45492b-4ba6-48e2-996e-807c20ed7852\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xwfg8" Nov 21 14:28:01 crc kubenswrapper[5133]: I1121 14:28:01.311259 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1c45492b-4ba6-48e2-996e-807c20ed7852-ssh-key\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xwfg8\" (UID: \"1c45492b-4ba6-48e2-996e-807c20ed7852\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xwfg8" Nov 21 14:28:01 crc kubenswrapper[5133]: I1121 14:28:01.311292 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/1c45492b-4ba6-48e2-996e-807c20ed7852-ceph\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xwfg8\" (UID: \"1c45492b-4ba6-48e2-996e-807c20ed7852\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xwfg8" Nov 21 14:28:01 crc kubenswrapper[5133]: I1121 14:28:01.312042 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1c45492b-4ba6-48e2-996e-807c20ed7852-inventory\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xwfg8\" (UID: \"1c45492b-4ba6-48e2-996e-807c20ed7852\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xwfg8" Nov 21 14:28:01 crc kubenswrapper[5133]: I1121 14:28:01.319893 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1c45492b-4ba6-48e2-996e-807c20ed7852-ssh-key\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xwfg8\" (UID: \"1c45492b-4ba6-48e2-996e-807c20ed7852\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xwfg8" Nov 21 14:28:01 crc kubenswrapper[5133]: I1121 14:28:01.320611 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/1c45492b-4ba6-48e2-996e-807c20ed7852-ceph\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xwfg8\" (UID: \"1c45492b-4ba6-48e2-996e-807c20ed7852\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xwfg8" Nov 21 14:28:01 crc kubenswrapper[5133]: I1121 14:28:01.321440 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1c45492b-4ba6-48e2-996e-807c20ed7852-inventory\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xwfg8\" (UID: \"1c45492b-4ba6-48e2-996e-807c20ed7852\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xwfg8" Nov 21 14:28:01 crc kubenswrapper[5133]: I1121 14:28:01.330604 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gprjj\" (UniqueName: \"kubernetes.io/projected/1c45492b-4ba6-48e2-996e-807c20ed7852-kube-api-access-gprjj\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xwfg8\" (UID: \"1c45492b-4ba6-48e2-996e-807c20ed7852\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xwfg8" Nov 21 14:28:01 crc kubenswrapper[5133]: I1121 14:28:01.392119 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xwfg8" Nov 21 14:28:01 crc kubenswrapper[5133]: I1121 14:28:01.796456 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xwfg8"] Nov 21 14:28:01 crc kubenswrapper[5133]: I1121 14:28:01.980061 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xwfg8" event={"ID":"1c45492b-4ba6-48e2-996e-807c20ed7852","Type":"ContainerStarted","Data":"c3ec2c68052a72332ef82baa5c751d98435d1fde260d0721cf35395e421f2f05"} Nov 21 14:28:02 crc kubenswrapper[5133]: I1121 14:28:02.990760 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xwfg8" event={"ID":"1c45492b-4ba6-48e2-996e-807c20ed7852","Type":"ContainerStarted","Data":"f99761e68b3c3eab16702322f8976ad0f29d70c40af13746f60389fcd0433153"} Nov 21 14:28:03 crc kubenswrapper[5133]: I1121 14:28:03.014477 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xwfg8" podStartSLOduration=1.57389554 podStartE2EDuration="2.014461601s" podCreationTimestamp="2025-11-21 14:28:01 +0000 UTC" firstStartedPulling="2025-11-21 14:28:01.807611197 +0000 UTC m=+2741.605443445" lastFinishedPulling="2025-11-21 14:28:02.248177258 +0000 UTC m=+2742.046009506" observedRunningTime="2025-11-21 14:28:03.012816117 +0000 UTC m=+2742.810648375" watchObservedRunningTime="2025-11-21 14:28:03.014461601 +0000 UTC m=+2742.812293849" Nov 21 14:28:07 crc kubenswrapper[5133]: I1121 14:28:07.066956 5133 generic.go:334] "Generic (PLEG): container finished" podID="1c45492b-4ba6-48e2-996e-807c20ed7852" containerID="f99761e68b3c3eab16702322f8976ad0f29d70c40af13746f60389fcd0433153" exitCode=0 Nov 21 14:28:07 crc kubenswrapper[5133]: I1121 14:28:07.067108 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xwfg8" event={"ID":"1c45492b-4ba6-48e2-996e-807c20ed7852","Type":"ContainerDied","Data":"f99761e68b3c3eab16702322f8976ad0f29d70c40af13746f60389fcd0433153"} Nov 21 14:28:08 crc kubenswrapper[5133]: I1121 14:28:08.499090 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xwfg8" Nov 21 14:28:08 crc kubenswrapper[5133]: I1121 14:28:08.672178 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1c45492b-4ba6-48e2-996e-807c20ed7852-inventory\") pod \"1c45492b-4ba6-48e2-996e-807c20ed7852\" (UID: \"1c45492b-4ba6-48e2-996e-807c20ed7852\") " Nov 21 14:28:08 crc kubenswrapper[5133]: I1121 14:28:08.672265 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1c45492b-4ba6-48e2-996e-807c20ed7852-ssh-key\") pod \"1c45492b-4ba6-48e2-996e-807c20ed7852\" (UID: \"1c45492b-4ba6-48e2-996e-807c20ed7852\") " Nov 21 14:28:08 crc kubenswrapper[5133]: I1121 14:28:08.672330 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gprjj\" (UniqueName: \"kubernetes.io/projected/1c45492b-4ba6-48e2-996e-807c20ed7852-kube-api-access-gprjj\") pod \"1c45492b-4ba6-48e2-996e-807c20ed7852\" (UID: \"1c45492b-4ba6-48e2-996e-807c20ed7852\") " Nov 21 14:28:08 crc kubenswrapper[5133]: I1121 14:28:08.672380 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/1c45492b-4ba6-48e2-996e-807c20ed7852-ceph\") pod \"1c45492b-4ba6-48e2-996e-807c20ed7852\" (UID: \"1c45492b-4ba6-48e2-996e-807c20ed7852\") " Nov 21 14:28:08 crc kubenswrapper[5133]: I1121 14:28:08.678976 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1c45492b-4ba6-48e2-996e-807c20ed7852-kube-api-access-gprjj" (OuterVolumeSpecName: "kube-api-access-gprjj") pod "1c45492b-4ba6-48e2-996e-807c20ed7852" (UID: "1c45492b-4ba6-48e2-996e-807c20ed7852"). InnerVolumeSpecName "kube-api-access-gprjj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:28:08 crc kubenswrapper[5133]: I1121 14:28:08.684260 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c45492b-4ba6-48e2-996e-807c20ed7852-ceph" (OuterVolumeSpecName: "ceph") pod "1c45492b-4ba6-48e2-996e-807c20ed7852" (UID: "1c45492b-4ba6-48e2-996e-807c20ed7852"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:28:08 crc kubenswrapper[5133]: I1121 14:28:08.701550 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c45492b-4ba6-48e2-996e-807c20ed7852-inventory" (OuterVolumeSpecName: "inventory") pod "1c45492b-4ba6-48e2-996e-807c20ed7852" (UID: "1c45492b-4ba6-48e2-996e-807c20ed7852"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:28:08 crc kubenswrapper[5133]: I1121 14:28:08.721193 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c45492b-4ba6-48e2-996e-807c20ed7852-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "1c45492b-4ba6-48e2-996e-807c20ed7852" (UID: "1c45492b-4ba6-48e2-996e-807c20ed7852"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:28:08 crc kubenswrapper[5133]: I1121 14:28:08.774712 5133 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/1c45492b-4ba6-48e2-996e-807c20ed7852-ceph\") on node \"crc\" DevicePath \"\"" Nov 21 14:28:08 crc kubenswrapper[5133]: I1121 14:28:08.774759 5133 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1c45492b-4ba6-48e2-996e-807c20ed7852-inventory\") on node \"crc\" DevicePath \"\"" Nov 21 14:28:08 crc kubenswrapper[5133]: I1121 14:28:08.774779 5133 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1c45492b-4ba6-48e2-996e-807c20ed7852-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 21 14:28:08 crc kubenswrapper[5133]: I1121 14:28:08.774801 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gprjj\" (UniqueName: \"kubernetes.io/projected/1c45492b-4ba6-48e2-996e-807c20ed7852-kube-api-access-gprjj\") on node \"crc\" DevicePath \"\"" Nov 21 14:28:09 crc kubenswrapper[5133]: I1121 14:28:09.088400 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xwfg8" event={"ID":"1c45492b-4ba6-48e2-996e-807c20ed7852","Type":"ContainerDied","Data":"c3ec2c68052a72332ef82baa5c751d98435d1fde260d0721cf35395e421f2f05"} Nov 21 14:28:09 crc kubenswrapper[5133]: I1121 14:28:09.088449 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c3ec2c68052a72332ef82baa5c751d98435d1fde260d0721cf35395e421f2f05" Nov 21 14:28:09 crc kubenswrapper[5133]: I1121 14:28:09.088492 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xwfg8" Nov 21 14:28:09 crc kubenswrapper[5133]: I1121 14:28:09.172099 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-5zdfl"] Nov 21 14:28:09 crc kubenswrapper[5133]: E1121 14:28:09.172574 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c45492b-4ba6-48e2-996e-807c20ed7852" containerName="ceph-hci-pre-edpm-deployment-openstack-edpm-ipam" Nov 21 14:28:09 crc kubenswrapper[5133]: I1121 14:28:09.172597 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c45492b-4ba6-48e2-996e-807c20ed7852" containerName="ceph-hci-pre-edpm-deployment-openstack-edpm-ipam" Nov 21 14:28:09 crc kubenswrapper[5133]: I1121 14:28:09.173048 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="1c45492b-4ba6-48e2-996e-807c20ed7852" containerName="ceph-hci-pre-edpm-deployment-openstack-edpm-ipam" Nov 21 14:28:09 crc kubenswrapper[5133]: I1121 14:28:09.174215 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-5zdfl" Nov 21 14:28:09 crc kubenswrapper[5133]: I1121 14:28:09.181174 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 21 14:28:09 crc kubenswrapper[5133]: I1121 14:28:09.181183 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 21 14:28:09 crc kubenswrapper[5133]: I1121 14:28:09.181183 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 21 14:28:09 crc kubenswrapper[5133]: I1121 14:28:09.181742 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-ztmk4" Nov 21 14:28:09 crc kubenswrapper[5133]: I1121 14:28:09.182426 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 21 14:28:09 crc kubenswrapper[5133]: I1121 14:28:09.183273 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-5zdfl"] Nov 21 14:28:09 crc kubenswrapper[5133]: I1121 14:28:09.284179 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/037179d7-f069-49f7-8f2a-9fb94b34064d-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-5zdfl\" (UID: \"037179d7-f069-49f7-8f2a-9fb94b34064d\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-5zdfl" Nov 21 14:28:09 crc kubenswrapper[5133]: I1121 14:28:09.284263 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/037179d7-f069-49f7-8f2a-9fb94b34064d-ceph\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-5zdfl\" (UID: \"037179d7-f069-49f7-8f2a-9fb94b34064d\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-5zdfl" Nov 21 14:28:09 crc kubenswrapper[5133]: I1121 14:28:09.284297 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bwg5q\" (UniqueName: \"kubernetes.io/projected/037179d7-f069-49f7-8f2a-9fb94b34064d-kube-api-access-bwg5q\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-5zdfl\" (UID: \"037179d7-f069-49f7-8f2a-9fb94b34064d\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-5zdfl" Nov 21 14:28:09 crc kubenswrapper[5133]: I1121 14:28:09.284342 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/037179d7-f069-49f7-8f2a-9fb94b34064d-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-5zdfl\" (UID: \"037179d7-f069-49f7-8f2a-9fb94b34064d\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-5zdfl" Nov 21 14:28:09 crc kubenswrapper[5133]: I1121 14:28:09.386697 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/037179d7-f069-49f7-8f2a-9fb94b34064d-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-5zdfl\" (UID: \"037179d7-f069-49f7-8f2a-9fb94b34064d\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-5zdfl" Nov 21 14:28:09 crc kubenswrapper[5133]: I1121 14:28:09.386787 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/037179d7-f069-49f7-8f2a-9fb94b34064d-ceph\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-5zdfl\" (UID: \"037179d7-f069-49f7-8f2a-9fb94b34064d\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-5zdfl" Nov 21 14:28:09 crc kubenswrapper[5133]: I1121 14:28:09.386827 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bwg5q\" (UniqueName: \"kubernetes.io/projected/037179d7-f069-49f7-8f2a-9fb94b34064d-kube-api-access-bwg5q\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-5zdfl\" (UID: \"037179d7-f069-49f7-8f2a-9fb94b34064d\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-5zdfl" Nov 21 14:28:09 crc kubenswrapper[5133]: I1121 14:28:09.386885 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/037179d7-f069-49f7-8f2a-9fb94b34064d-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-5zdfl\" (UID: \"037179d7-f069-49f7-8f2a-9fb94b34064d\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-5zdfl" Nov 21 14:28:09 crc kubenswrapper[5133]: I1121 14:28:09.391219 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/037179d7-f069-49f7-8f2a-9fb94b34064d-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-5zdfl\" (UID: \"037179d7-f069-49f7-8f2a-9fb94b34064d\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-5zdfl" Nov 21 14:28:09 crc kubenswrapper[5133]: I1121 14:28:09.391687 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/037179d7-f069-49f7-8f2a-9fb94b34064d-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-5zdfl\" (UID: \"037179d7-f069-49f7-8f2a-9fb94b34064d\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-5zdfl" Nov 21 14:28:09 crc kubenswrapper[5133]: I1121 14:28:09.392808 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/037179d7-f069-49f7-8f2a-9fb94b34064d-ceph\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-5zdfl\" (UID: \"037179d7-f069-49f7-8f2a-9fb94b34064d\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-5zdfl" Nov 21 14:28:09 crc kubenswrapper[5133]: I1121 14:28:09.418721 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bwg5q\" (UniqueName: \"kubernetes.io/projected/037179d7-f069-49f7-8f2a-9fb94b34064d-kube-api-access-bwg5q\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-5zdfl\" (UID: \"037179d7-f069-49f7-8f2a-9fb94b34064d\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-5zdfl" Nov 21 14:28:09 crc kubenswrapper[5133]: I1121 14:28:09.495094 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-5zdfl" Nov 21 14:28:10 crc kubenswrapper[5133]: I1121 14:28:10.050409 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-5zdfl"] Nov 21 14:28:10 crc kubenswrapper[5133]: W1121 14:28:10.050920 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod037179d7_f069_49f7_8f2a_9fb94b34064d.slice/crio-201c2c7cbe937c141ab18cdfb6e39f5e3b5d8d9e9b9ab746880fd976987d03ca WatchSource:0}: Error finding container 201c2c7cbe937c141ab18cdfb6e39f5e3b5d8d9e9b9ab746880fd976987d03ca: Status 404 returned error can't find the container with id 201c2c7cbe937c141ab18cdfb6e39f5e3b5d8d9e9b9ab746880fd976987d03ca Nov 21 14:28:10 crc kubenswrapper[5133]: I1121 14:28:10.096358 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-5zdfl" event={"ID":"037179d7-f069-49f7-8f2a-9fb94b34064d","Type":"ContainerStarted","Data":"201c2c7cbe937c141ab18cdfb6e39f5e3b5d8d9e9b9ab746880fd976987d03ca"} Nov 21 14:28:11 crc kubenswrapper[5133]: I1121 14:28:11.106884 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-5zdfl" event={"ID":"037179d7-f069-49f7-8f2a-9fb94b34064d","Type":"ContainerStarted","Data":"831cdae9f27e1801318f9782f6a2e45a3b1d6b628f6d894a52813484d94f7911"} Nov 21 14:28:11 crc kubenswrapper[5133]: I1121 14:28:11.133580 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-5zdfl" podStartSLOduration=1.707373172 podStartE2EDuration="2.133560303s" podCreationTimestamp="2025-11-21 14:28:09 +0000 UTC" firstStartedPulling="2025-11-21 14:28:10.063081943 +0000 UTC m=+2749.860914191" lastFinishedPulling="2025-11-21 14:28:10.489269074 +0000 UTC m=+2750.287101322" observedRunningTime="2025-11-21 14:28:11.128423746 +0000 UTC m=+2750.926255994" watchObservedRunningTime="2025-11-21 14:28:11.133560303 +0000 UTC m=+2750.931392551" Nov 21 14:28:53 crc kubenswrapper[5133]: I1121 14:28:53.311234 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 14:28:53 crc kubenswrapper[5133]: I1121 14:28:53.311901 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 14:29:00 crc kubenswrapper[5133]: I1121 14:29:00.192743 5133 generic.go:334] "Generic (PLEG): container finished" podID="037179d7-f069-49f7-8f2a-9fb94b34064d" containerID="831cdae9f27e1801318f9782f6a2e45a3b1d6b628f6d894a52813484d94f7911" exitCode=0 Nov 21 14:29:00 crc kubenswrapper[5133]: I1121 14:29:00.192845 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-5zdfl" event={"ID":"037179d7-f069-49f7-8f2a-9fb94b34064d","Type":"ContainerDied","Data":"831cdae9f27e1801318f9782f6a2e45a3b1d6b628f6d894a52813484d94f7911"} Nov 21 14:29:01 crc kubenswrapper[5133]: I1121 14:29:01.726219 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-5zdfl" Nov 21 14:29:01 crc kubenswrapper[5133]: I1121 14:29:01.926723 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/037179d7-f069-49f7-8f2a-9fb94b34064d-inventory\") pod \"037179d7-f069-49f7-8f2a-9fb94b34064d\" (UID: \"037179d7-f069-49f7-8f2a-9fb94b34064d\") " Nov 21 14:29:01 crc kubenswrapper[5133]: I1121 14:29:01.926822 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/037179d7-f069-49f7-8f2a-9fb94b34064d-ceph\") pod \"037179d7-f069-49f7-8f2a-9fb94b34064d\" (UID: \"037179d7-f069-49f7-8f2a-9fb94b34064d\") " Nov 21 14:29:01 crc kubenswrapper[5133]: I1121 14:29:01.926913 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/037179d7-f069-49f7-8f2a-9fb94b34064d-ssh-key\") pod \"037179d7-f069-49f7-8f2a-9fb94b34064d\" (UID: \"037179d7-f069-49f7-8f2a-9fb94b34064d\") " Nov 21 14:29:01 crc kubenswrapper[5133]: I1121 14:29:01.926964 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bwg5q\" (UniqueName: \"kubernetes.io/projected/037179d7-f069-49f7-8f2a-9fb94b34064d-kube-api-access-bwg5q\") pod \"037179d7-f069-49f7-8f2a-9fb94b34064d\" (UID: \"037179d7-f069-49f7-8f2a-9fb94b34064d\") " Nov 21 14:29:01 crc kubenswrapper[5133]: I1121 14:29:01.939113 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/037179d7-f069-49f7-8f2a-9fb94b34064d-ceph" (OuterVolumeSpecName: "ceph") pod "037179d7-f069-49f7-8f2a-9fb94b34064d" (UID: "037179d7-f069-49f7-8f2a-9fb94b34064d"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:29:01 crc kubenswrapper[5133]: I1121 14:29:01.939156 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/037179d7-f069-49f7-8f2a-9fb94b34064d-kube-api-access-bwg5q" (OuterVolumeSpecName: "kube-api-access-bwg5q") pod "037179d7-f069-49f7-8f2a-9fb94b34064d" (UID: "037179d7-f069-49f7-8f2a-9fb94b34064d"). InnerVolumeSpecName "kube-api-access-bwg5q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:29:01 crc kubenswrapper[5133]: E1121 14:29:01.955517 5133 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/037179d7-f069-49f7-8f2a-9fb94b34064d-ssh-key podName:037179d7-f069-49f7-8f2a-9fb94b34064d nodeName:}" failed. No retries permitted until 2025-11-21 14:29:02.45547815 +0000 UTC m=+2802.253310428 (durationBeforeRetry 500ms). Error: error cleaning subPath mounts for volume "ssh-key" (UniqueName: "kubernetes.io/secret/037179d7-f069-49f7-8f2a-9fb94b34064d-ssh-key") pod "037179d7-f069-49f7-8f2a-9fb94b34064d" (UID: "037179d7-f069-49f7-8f2a-9fb94b34064d") : error deleting /var/lib/kubelet/pods/037179d7-f069-49f7-8f2a-9fb94b34064d/volume-subpaths: remove /var/lib/kubelet/pods/037179d7-f069-49f7-8f2a-9fb94b34064d/volume-subpaths: no such file or directory Nov 21 14:29:01 crc kubenswrapper[5133]: I1121 14:29:01.958749 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/037179d7-f069-49f7-8f2a-9fb94b34064d-inventory" (OuterVolumeSpecName: "inventory") pod "037179d7-f069-49f7-8f2a-9fb94b34064d" (UID: "037179d7-f069-49f7-8f2a-9fb94b34064d"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:29:02 crc kubenswrapper[5133]: I1121 14:29:02.029747 5133 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/037179d7-f069-49f7-8f2a-9fb94b34064d-inventory\") on node \"crc\" DevicePath \"\"" Nov 21 14:29:02 crc kubenswrapper[5133]: I1121 14:29:02.029804 5133 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/037179d7-f069-49f7-8f2a-9fb94b34064d-ceph\") on node \"crc\" DevicePath \"\"" Nov 21 14:29:02 crc kubenswrapper[5133]: I1121 14:29:02.029818 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bwg5q\" (UniqueName: \"kubernetes.io/projected/037179d7-f069-49f7-8f2a-9fb94b34064d-kube-api-access-bwg5q\") on node \"crc\" DevicePath \"\"" Nov 21 14:29:02 crc kubenswrapper[5133]: I1121 14:29:02.215677 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-5zdfl" event={"ID":"037179d7-f069-49f7-8f2a-9fb94b34064d","Type":"ContainerDied","Data":"201c2c7cbe937c141ab18cdfb6e39f5e3b5d8d9e9b9ab746880fd976987d03ca"} Nov 21 14:29:02 crc kubenswrapper[5133]: I1121 14:29:02.215731 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="201c2c7cbe937c141ab18cdfb6e39f5e3b5d8d9e9b9ab746880fd976987d03ca" Nov 21 14:29:02 crc kubenswrapper[5133]: I1121 14:29:02.215787 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-5zdfl" Nov 21 14:29:02 crc kubenswrapper[5133]: I1121 14:29:02.302817 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-6kqrk"] Nov 21 14:29:02 crc kubenswrapper[5133]: E1121 14:29:02.303245 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="037179d7-f069-49f7-8f2a-9fb94b34064d" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 21 14:29:02 crc kubenswrapper[5133]: I1121 14:29:02.303265 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="037179d7-f069-49f7-8f2a-9fb94b34064d" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 21 14:29:02 crc kubenswrapper[5133]: I1121 14:29:02.303478 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="037179d7-f069-49f7-8f2a-9fb94b34064d" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 21 14:29:02 crc kubenswrapper[5133]: I1121 14:29:02.304210 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-6kqrk" Nov 21 14:29:02 crc kubenswrapper[5133]: I1121 14:29:02.320414 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-6kqrk"] Nov 21 14:29:02 crc kubenswrapper[5133]: I1121 14:29:02.437455 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/1b73a569-cd93-4222-bd53-77d391457324-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-6kqrk\" (UID: \"1b73a569-cd93-4222-bd53-77d391457324\") " pod="openstack/ssh-known-hosts-edpm-deployment-6kqrk" Nov 21 14:29:02 crc kubenswrapper[5133]: I1121 14:29:02.437914 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/1b73a569-cd93-4222-bd53-77d391457324-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-6kqrk\" (UID: \"1b73a569-cd93-4222-bd53-77d391457324\") " pod="openstack/ssh-known-hosts-edpm-deployment-6kqrk" Nov 21 14:29:02 crc kubenswrapper[5133]: I1121 14:29:02.438008 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/1b73a569-cd93-4222-bd53-77d391457324-ceph\") pod \"ssh-known-hosts-edpm-deployment-6kqrk\" (UID: \"1b73a569-cd93-4222-bd53-77d391457324\") " pod="openstack/ssh-known-hosts-edpm-deployment-6kqrk" Nov 21 14:29:02 crc kubenswrapper[5133]: I1121 14:29:02.438141 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q8cwd\" (UniqueName: \"kubernetes.io/projected/1b73a569-cd93-4222-bd53-77d391457324-kube-api-access-q8cwd\") pod \"ssh-known-hosts-edpm-deployment-6kqrk\" (UID: \"1b73a569-cd93-4222-bd53-77d391457324\") " pod="openstack/ssh-known-hosts-edpm-deployment-6kqrk" Nov 21 14:29:02 crc kubenswrapper[5133]: I1121 14:29:02.539251 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/037179d7-f069-49f7-8f2a-9fb94b34064d-ssh-key\") pod \"037179d7-f069-49f7-8f2a-9fb94b34064d\" (UID: \"037179d7-f069-49f7-8f2a-9fb94b34064d\") " Nov 21 14:29:02 crc kubenswrapper[5133]: I1121 14:29:02.539694 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/1b73a569-cd93-4222-bd53-77d391457324-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-6kqrk\" (UID: \"1b73a569-cd93-4222-bd53-77d391457324\") " pod="openstack/ssh-known-hosts-edpm-deployment-6kqrk" Nov 21 14:29:02 crc kubenswrapper[5133]: I1121 14:29:02.539759 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/1b73a569-cd93-4222-bd53-77d391457324-ceph\") pod \"ssh-known-hosts-edpm-deployment-6kqrk\" (UID: \"1b73a569-cd93-4222-bd53-77d391457324\") " pod="openstack/ssh-known-hosts-edpm-deployment-6kqrk" Nov 21 14:29:02 crc kubenswrapper[5133]: I1121 14:29:02.539843 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q8cwd\" (UniqueName: \"kubernetes.io/projected/1b73a569-cd93-4222-bd53-77d391457324-kube-api-access-q8cwd\") pod \"ssh-known-hosts-edpm-deployment-6kqrk\" (UID: \"1b73a569-cd93-4222-bd53-77d391457324\") " pod="openstack/ssh-known-hosts-edpm-deployment-6kqrk" Nov 21 14:29:02 crc kubenswrapper[5133]: I1121 14:29:02.539887 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/1b73a569-cd93-4222-bd53-77d391457324-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-6kqrk\" (UID: \"1b73a569-cd93-4222-bd53-77d391457324\") " pod="openstack/ssh-known-hosts-edpm-deployment-6kqrk" Nov 21 14:29:02 crc kubenswrapper[5133]: I1121 14:29:02.545956 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/1b73a569-cd93-4222-bd53-77d391457324-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-6kqrk\" (UID: \"1b73a569-cd93-4222-bd53-77d391457324\") " pod="openstack/ssh-known-hosts-edpm-deployment-6kqrk" Nov 21 14:29:02 crc kubenswrapper[5133]: I1121 14:29:02.548701 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/1b73a569-cd93-4222-bd53-77d391457324-ceph\") pod \"ssh-known-hosts-edpm-deployment-6kqrk\" (UID: \"1b73a569-cd93-4222-bd53-77d391457324\") " pod="openstack/ssh-known-hosts-edpm-deployment-6kqrk" Nov 21 14:29:02 crc kubenswrapper[5133]: I1121 14:29:02.554304 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/1b73a569-cd93-4222-bd53-77d391457324-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-6kqrk\" (UID: \"1b73a569-cd93-4222-bd53-77d391457324\") " pod="openstack/ssh-known-hosts-edpm-deployment-6kqrk" Nov 21 14:29:02 crc kubenswrapper[5133]: I1121 14:29:02.559178 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/037179d7-f069-49f7-8f2a-9fb94b34064d-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "037179d7-f069-49f7-8f2a-9fb94b34064d" (UID: "037179d7-f069-49f7-8f2a-9fb94b34064d"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:29:02 crc kubenswrapper[5133]: I1121 14:29:02.572195 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q8cwd\" (UniqueName: \"kubernetes.io/projected/1b73a569-cd93-4222-bd53-77d391457324-kube-api-access-q8cwd\") pod \"ssh-known-hosts-edpm-deployment-6kqrk\" (UID: \"1b73a569-cd93-4222-bd53-77d391457324\") " pod="openstack/ssh-known-hosts-edpm-deployment-6kqrk" Nov 21 14:29:02 crc kubenswrapper[5133]: I1121 14:29:02.623086 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-6kqrk" Nov 21 14:29:02 crc kubenswrapper[5133]: I1121 14:29:02.641563 5133 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/037179d7-f069-49f7-8f2a-9fb94b34064d-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 21 14:29:03 crc kubenswrapper[5133]: I1121 14:29:03.225859 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-6kqrk"] Nov 21 14:29:03 crc kubenswrapper[5133]: W1121 14:29:03.227636 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1b73a569_cd93_4222_bd53_77d391457324.slice/crio-f0028edcf5fff111693d5f6ac9433091619f05ba6b56a524c4b60dda3617436b WatchSource:0}: Error finding container f0028edcf5fff111693d5f6ac9433091619f05ba6b56a524c4b60dda3617436b: Status 404 returned error can't find the container with id f0028edcf5fff111693d5f6ac9433091619f05ba6b56a524c4b60dda3617436b Nov 21 14:29:03 crc kubenswrapper[5133]: I1121 14:29:03.230449 5133 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 21 14:29:04 crc kubenswrapper[5133]: I1121 14:29:04.237403 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-6kqrk" event={"ID":"1b73a569-cd93-4222-bd53-77d391457324","Type":"ContainerStarted","Data":"c4831159ec73d92f6a649debda03a12d720de79e4d95e46be19d4fd719a7ec90"} Nov 21 14:29:04 crc kubenswrapper[5133]: I1121 14:29:04.237916 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-6kqrk" event={"ID":"1b73a569-cd93-4222-bd53-77d391457324","Type":"ContainerStarted","Data":"f0028edcf5fff111693d5f6ac9433091619f05ba6b56a524c4b60dda3617436b"} Nov 21 14:29:04 crc kubenswrapper[5133]: I1121 14:29:04.255919 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ssh-known-hosts-edpm-deployment-6kqrk" podStartSLOduration=1.78786008 podStartE2EDuration="2.255899776s" podCreationTimestamp="2025-11-21 14:29:02 +0000 UTC" firstStartedPulling="2025-11-21 14:29:03.230042959 +0000 UTC m=+2803.027875227" lastFinishedPulling="2025-11-21 14:29:03.698082635 +0000 UTC m=+2803.495914923" observedRunningTime="2025-11-21 14:29:04.252839134 +0000 UTC m=+2804.050671402" watchObservedRunningTime="2025-11-21 14:29:04.255899776 +0000 UTC m=+2804.053732024" Nov 21 14:29:14 crc kubenswrapper[5133]: I1121 14:29:14.344922 5133 generic.go:334] "Generic (PLEG): container finished" podID="1b73a569-cd93-4222-bd53-77d391457324" containerID="c4831159ec73d92f6a649debda03a12d720de79e4d95e46be19d4fd719a7ec90" exitCode=0 Nov 21 14:29:14 crc kubenswrapper[5133]: I1121 14:29:14.345024 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-6kqrk" event={"ID":"1b73a569-cd93-4222-bd53-77d391457324","Type":"ContainerDied","Data":"c4831159ec73d92f6a649debda03a12d720de79e4d95e46be19d4fd719a7ec90"} Nov 21 14:29:15 crc kubenswrapper[5133]: I1121 14:29:15.852196 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-6kqrk" Nov 21 14:29:15 crc kubenswrapper[5133]: I1121 14:29:15.899169 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/1b73a569-cd93-4222-bd53-77d391457324-ssh-key-openstack-edpm-ipam\") pod \"1b73a569-cd93-4222-bd53-77d391457324\" (UID: \"1b73a569-cd93-4222-bd53-77d391457324\") " Nov 21 14:29:15 crc kubenswrapper[5133]: I1121 14:29:15.899259 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/1b73a569-cd93-4222-bd53-77d391457324-inventory-0\") pod \"1b73a569-cd93-4222-bd53-77d391457324\" (UID: \"1b73a569-cd93-4222-bd53-77d391457324\") " Nov 21 14:29:15 crc kubenswrapper[5133]: I1121 14:29:15.899374 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/1b73a569-cd93-4222-bd53-77d391457324-ceph\") pod \"1b73a569-cd93-4222-bd53-77d391457324\" (UID: \"1b73a569-cd93-4222-bd53-77d391457324\") " Nov 21 14:29:15 crc kubenswrapper[5133]: I1121 14:29:15.899445 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q8cwd\" (UniqueName: \"kubernetes.io/projected/1b73a569-cd93-4222-bd53-77d391457324-kube-api-access-q8cwd\") pod \"1b73a569-cd93-4222-bd53-77d391457324\" (UID: \"1b73a569-cd93-4222-bd53-77d391457324\") " Nov 21 14:29:15 crc kubenswrapper[5133]: I1121 14:29:15.904979 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b73a569-cd93-4222-bd53-77d391457324-ceph" (OuterVolumeSpecName: "ceph") pod "1b73a569-cd93-4222-bd53-77d391457324" (UID: "1b73a569-cd93-4222-bd53-77d391457324"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:29:15 crc kubenswrapper[5133]: I1121 14:29:15.905794 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1b73a569-cd93-4222-bd53-77d391457324-kube-api-access-q8cwd" (OuterVolumeSpecName: "kube-api-access-q8cwd") pod "1b73a569-cd93-4222-bd53-77d391457324" (UID: "1b73a569-cd93-4222-bd53-77d391457324"). InnerVolumeSpecName "kube-api-access-q8cwd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:29:15 crc kubenswrapper[5133]: I1121 14:29:15.924731 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b73a569-cd93-4222-bd53-77d391457324-inventory-0" (OuterVolumeSpecName: "inventory-0") pod "1b73a569-cd93-4222-bd53-77d391457324" (UID: "1b73a569-cd93-4222-bd53-77d391457324"). InnerVolumeSpecName "inventory-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:29:15 crc kubenswrapper[5133]: I1121 14:29:15.927430 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b73a569-cd93-4222-bd53-77d391457324-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "1b73a569-cd93-4222-bd53-77d391457324" (UID: "1b73a569-cd93-4222-bd53-77d391457324"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:29:16 crc kubenswrapper[5133]: I1121 14:29:16.001745 5133 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/1b73a569-cd93-4222-bd53-77d391457324-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Nov 21 14:29:16 crc kubenswrapper[5133]: I1121 14:29:16.001781 5133 reconciler_common.go:293] "Volume detached for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/1b73a569-cd93-4222-bd53-77d391457324-inventory-0\") on node \"crc\" DevicePath \"\"" Nov 21 14:29:16 crc kubenswrapper[5133]: I1121 14:29:16.001795 5133 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/1b73a569-cd93-4222-bd53-77d391457324-ceph\") on node \"crc\" DevicePath \"\"" Nov 21 14:29:16 crc kubenswrapper[5133]: I1121 14:29:16.001807 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q8cwd\" (UniqueName: \"kubernetes.io/projected/1b73a569-cd93-4222-bd53-77d391457324-kube-api-access-q8cwd\") on node \"crc\" DevicePath \"\"" Nov 21 14:29:16 crc kubenswrapper[5133]: I1121 14:29:16.369913 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-6kqrk" event={"ID":"1b73a569-cd93-4222-bd53-77d391457324","Type":"ContainerDied","Data":"f0028edcf5fff111693d5f6ac9433091619f05ba6b56a524c4b60dda3617436b"} Nov 21 14:29:16 crc kubenswrapper[5133]: I1121 14:29:16.369951 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f0028edcf5fff111693d5f6ac9433091619f05ba6b56a524c4b60dda3617436b" Nov 21 14:29:16 crc kubenswrapper[5133]: I1121 14:29:16.370129 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-6kqrk" Nov 21 14:29:16 crc kubenswrapper[5133]: I1121 14:29:16.470176 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-gm5r2"] Nov 21 14:29:16 crc kubenswrapper[5133]: E1121 14:29:16.470740 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1b73a569-cd93-4222-bd53-77d391457324" containerName="ssh-known-hosts-edpm-deployment" Nov 21 14:29:16 crc kubenswrapper[5133]: I1121 14:29:16.470762 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="1b73a569-cd93-4222-bd53-77d391457324" containerName="ssh-known-hosts-edpm-deployment" Nov 21 14:29:16 crc kubenswrapper[5133]: I1121 14:29:16.470983 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="1b73a569-cd93-4222-bd53-77d391457324" containerName="ssh-known-hosts-edpm-deployment" Nov 21 14:29:16 crc kubenswrapper[5133]: I1121 14:29:16.471854 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-gm5r2" Nov 21 14:29:16 crc kubenswrapper[5133]: I1121 14:29:16.474459 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 21 14:29:16 crc kubenswrapper[5133]: I1121 14:29:16.474762 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 21 14:29:16 crc kubenswrapper[5133]: I1121 14:29:16.474902 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 21 14:29:16 crc kubenswrapper[5133]: I1121 14:29:16.475086 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 21 14:29:16 crc kubenswrapper[5133]: I1121 14:29:16.475918 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-ztmk4" Nov 21 14:29:16 crc kubenswrapper[5133]: I1121 14:29:16.484301 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-gm5r2"] Nov 21 14:29:16 crc kubenswrapper[5133]: I1121 14:29:16.521422 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/060dc158-8f7a-4b81-9f16-ca7258cddeb5-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-gm5r2\" (UID: \"060dc158-8f7a-4b81-9f16-ca7258cddeb5\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-gm5r2" Nov 21 14:29:16 crc kubenswrapper[5133]: I1121 14:29:16.521529 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/060dc158-8f7a-4b81-9f16-ca7258cddeb5-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-gm5r2\" (UID: \"060dc158-8f7a-4b81-9f16-ca7258cddeb5\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-gm5r2" Nov 21 14:29:16 crc kubenswrapper[5133]: I1121 14:29:16.521688 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5dbg6\" (UniqueName: \"kubernetes.io/projected/060dc158-8f7a-4b81-9f16-ca7258cddeb5-kube-api-access-5dbg6\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-gm5r2\" (UID: \"060dc158-8f7a-4b81-9f16-ca7258cddeb5\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-gm5r2" Nov 21 14:29:16 crc kubenswrapper[5133]: I1121 14:29:16.521757 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/060dc158-8f7a-4b81-9f16-ca7258cddeb5-ceph\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-gm5r2\" (UID: \"060dc158-8f7a-4b81-9f16-ca7258cddeb5\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-gm5r2" Nov 21 14:29:16 crc kubenswrapper[5133]: I1121 14:29:16.623307 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5dbg6\" (UniqueName: \"kubernetes.io/projected/060dc158-8f7a-4b81-9f16-ca7258cddeb5-kube-api-access-5dbg6\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-gm5r2\" (UID: \"060dc158-8f7a-4b81-9f16-ca7258cddeb5\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-gm5r2" Nov 21 14:29:16 crc kubenswrapper[5133]: I1121 14:29:16.623691 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/060dc158-8f7a-4b81-9f16-ca7258cddeb5-ceph\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-gm5r2\" (UID: \"060dc158-8f7a-4b81-9f16-ca7258cddeb5\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-gm5r2" Nov 21 14:29:16 crc kubenswrapper[5133]: I1121 14:29:16.623791 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/060dc158-8f7a-4b81-9f16-ca7258cddeb5-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-gm5r2\" (UID: \"060dc158-8f7a-4b81-9f16-ca7258cddeb5\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-gm5r2" Nov 21 14:29:16 crc kubenswrapper[5133]: I1121 14:29:16.623856 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/060dc158-8f7a-4b81-9f16-ca7258cddeb5-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-gm5r2\" (UID: \"060dc158-8f7a-4b81-9f16-ca7258cddeb5\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-gm5r2" Nov 21 14:29:16 crc kubenswrapper[5133]: I1121 14:29:16.630409 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/060dc158-8f7a-4b81-9f16-ca7258cddeb5-ceph\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-gm5r2\" (UID: \"060dc158-8f7a-4b81-9f16-ca7258cddeb5\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-gm5r2" Nov 21 14:29:16 crc kubenswrapper[5133]: I1121 14:29:16.631069 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/060dc158-8f7a-4b81-9f16-ca7258cddeb5-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-gm5r2\" (UID: \"060dc158-8f7a-4b81-9f16-ca7258cddeb5\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-gm5r2" Nov 21 14:29:16 crc kubenswrapper[5133]: I1121 14:29:16.631285 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/060dc158-8f7a-4b81-9f16-ca7258cddeb5-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-gm5r2\" (UID: \"060dc158-8f7a-4b81-9f16-ca7258cddeb5\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-gm5r2" Nov 21 14:29:16 crc kubenswrapper[5133]: I1121 14:29:16.646197 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5dbg6\" (UniqueName: \"kubernetes.io/projected/060dc158-8f7a-4b81-9f16-ca7258cddeb5-kube-api-access-5dbg6\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-gm5r2\" (UID: \"060dc158-8f7a-4b81-9f16-ca7258cddeb5\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-gm5r2" Nov 21 14:29:16 crc kubenswrapper[5133]: I1121 14:29:16.834253 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-gm5r2" Nov 21 14:29:17 crc kubenswrapper[5133]: I1121 14:29:17.199976 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-gm5r2"] Nov 21 14:29:17 crc kubenswrapper[5133]: I1121 14:29:17.383925 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-gm5r2" event={"ID":"060dc158-8f7a-4b81-9f16-ca7258cddeb5","Type":"ContainerStarted","Data":"6c2d2953fab6e8a2a7de89be0e8909747b02bc3cc7d63bf0fc69e9e3a1cff47b"} Nov 21 14:29:18 crc kubenswrapper[5133]: I1121 14:29:18.394230 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-gm5r2" event={"ID":"060dc158-8f7a-4b81-9f16-ca7258cddeb5","Type":"ContainerStarted","Data":"1563cafaf3a0a011a0b2f4f0bfc3f8a46fe3933c0c40a924f7b95f7a1d1b4a70"} Nov 21 14:29:18 crc kubenswrapper[5133]: I1121 14:29:18.446795 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-gm5r2" podStartSLOduration=2.01920329 podStartE2EDuration="2.446772655s" podCreationTimestamp="2025-11-21 14:29:16 +0000 UTC" firstStartedPulling="2025-11-21 14:29:17.214084693 +0000 UTC m=+2817.011916941" lastFinishedPulling="2025-11-21 14:29:17.641654028 +0000 UTC m=+2817.439486306" observedRunningTime="2025-11-21 14:29:18.413685424 +0000 UTC m=+2818.211517692" watchObservedRunningTime="2025-11-21 14:29:18.446772655 +0000 UTC m=+2818.244604913" Nov 21 14:29:23 crc kubenswrapper[5133]: I1121 14:29:23.311107 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 14:29:23 crc kubenswrapper[5133]: I1121 14:29:23.311717 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 14:29:26 crc kubenswrapper[5133]: I1121 14:29:26.476236 5133 generic.go:334] "Generic (PLEG): container finished" podID="060dc158-8f7a-4b81-9f16-ca7258cddeb5" containerID="1563cafaf3a0a011a0b2f4f0bfc3f8a46fe3933c0c40a924f7b95f7a1d1b4a70" exitCode=0 Nov 21 14:29:26 crc kubenswrapper[5133]: I1121 14:29:26.476333 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-gm5r2" event={"ID":"060dc158-8f7a-4b81-9f16-ca7258cddeb5","Type":"ContainerDied","Data":"1563cafaf3a0a011a0b2f4f0bfc3f8a46fe3933c0c40a924f7b95f7a1d1b4a70"} Nov 21 14:29:27 crc kubenswrapper[5133]: I1121 14:29:27.961703 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-gm5r2" Nov 21 14:29:28 crc kubenswrapper[5133]: I1121 14:29:28.040214 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/060dc158-8f7a-4b81-9f16-ca7258cddeb5-ssh-key\") pod \"060dc158-8f7a-4b81-9f16-ca7258cddeb5\" (UID: \"060dc158-8f7a-4b81-9f16-ca7258cddeb5\") " Nov 21 14:29:28 crc kubenswrapper[5133]: I1121 14:29:28.040411 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/060dc158-8f7a-4b81-9f16-ca7258cddeb5-ceph\") pod \"060dc158-8f7a-4b81-9f16-ca7258cddeb5\" (UID: \"060dc158-8f7a-4b81-9f16-ca7258cddeb5\") " Nov 21 14:29:28 crc kubenswrapper[5133]: I1121 14:29:28.040633 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5dbg6\" (UniqueName: \"kubernetes.io/projected/060dc158-8f7a-4b81-9f16-ca7258cddeb5-kube-api-access-5dbg6\") pod \"060dc158-8f7a-4b81-9f16-ca7258cddeb5\" (UID: \"060dc158-8f7a-4b81-9f16-ca7258cddeb5\") " Nov 21 14:29:28 crc kubenswrapper[5133]: I1121 14:29:28.040746 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/060dc158-8f7a-4b81-9f16-ca7258cddeb5-inventory\") pod \"060dc158-8f7a-4b81-9f16-ca7258cddeb5\" (UID: \"060dc158-8f7a-4b81-9f16-ca7258cddeb5\") " Nov 21 14:29:28 crc kubenswrapper[5133]: I1121 14:29:28.047195 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/060dc158-8f7a-4b81-9f16-ca7258cddeb5-kube-api-access-5dbg6" (OuterVolumeSpecName: "kube-api-access-5dbg6") pod "060dc158-8f7a-4b81-9f16-ca7258cddeb5" (UID: "060dc158-8f7a-4b81-9f16-ca7258cddeb5"). InnerVolumeSpecName "kube-api-access-5dbg6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:29:28 crc kubenswrapper[5133]: I1121 14:29:28.053971 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/060dc158-8f7a-4b81-9f16-ca7258cddeb5-ceph" (OuterVolumeSpecName: "ceph") pod "060dc158-8f7a-4b81-9f16-ca7258cddeb5" (UID: "060dc158-8f7a-4b81-9f16-ca7258cddeb5"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:29:28 crc kubenswrapper[5133]: I1121 14:29:28.075341 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/060dc158-8f7a-4b81-9f16-ca7258cddeb5-inventory" (OuterVolumeSpecName: "inventory") pod "060dc158-8f7a-4b81-9f16-ca7258cddeb5" (UID: "060dc158-8f7a-4b81-9f16-ca7258cddeb5"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:29:28 crc kubenswrapper[5133]: I1121 14:29:28.075588 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/060dc158-8f7a-4b81-9f16-ca7258cddeb5-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "060dc158-8f7a-4b81-9f16-ca7258cddeb5" (UID: "060dc158-8f7a-4b81-9f16-ca7258cddeb5"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:29:28 crc kubenswrapper[5133]: I1121 14:29:28.142864 5133 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/060dc158-8f7a-4b81-9f16-ca7258cddeb5-ceph\") on node \"crc\" DevicePath \"\"" Nov 21 14:29:28 crc kubenswrapper[5133]: I1121 14:29:28.142930 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5dbg6\" (UniqueName: \"kubernetes.io/projected/060dc158-8f7a-4b81-9f16-ca7258cddeb5-kube-api-access-5dbg6\") on node \"crc\" DevicePath \"\"" Nov 21 14:29:28 crc kubenswrapper[5133]: I1121 14:29:28.142954 5133 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/060dc158-8f7a-4b81-9f16-ca7258cddeb5-inventory\") on node \"crc\" DevicePath \"\"" Nov 21 14:29:28 crc kubenswrapper[5133]: I1121 14:29:28.142971 5133 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/060dc158-8f7a-4b81-9f16-ca7258cddeb5-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 21 14:29:28 crc kubenswrapper[5133]: I1121 14:29:28.518696 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-gm5r2" event={"ID":"060dc158-8f7a-4b81-9f16-ca7258cddeb5","Type":"ContainerDied","Data":"6c2d2953fab6e8a2a7de89be0e8909747b02bc3cc7d63bf0fc69e9e3a1cff47b"} Nov 21 14:29:28 crc kubenswrapper[5133]: I1121 14:29:28.518761 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6c2d2953fab6e8a2a7de89be0e8909747b02bc3cc7d63bf0fc69e9e3a1cff47b" Nov 21 14:29:28 crc kubenswrapper[5133]: I1121 14:29:28.518834 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-gm5r2" Nov 21 14:29:28 crc kubenswrapper[5133]: I1121 14:29:28.592739 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-5ldjx"] Nov 21 14:29:28 crc kubenswrapper[5133]: E1121 14:29:28.593676 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="060dc158-8f7a-4b81-9f16-ca7258cddeb5" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 21 14:29:28 crc kubenswrapper[5133]: I1121 14:29:28.593706 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="060dc158-8f7a-4b81-9f16-ca7258cddeb5" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 21 14:29:28 crc kubenswrapper[5133]: I1121 14:29:28.593968 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="060dc158-8f7a-4b81-9f16-ca7258cddeb5" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 21 14:29:28 crc kubenswrapper[5133]: I1121 14:29:28.595048 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-5ldjx" Nov 21 14:29:28 crc kubenswrapper[5133]: I1121 14:29:28.597507 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 21 14:29:28 crc kubenswrapper[5133]: I1121 14:29:28.597701 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 21 14:29:28 crc kubenswrapper[5133]: I1121 14:29:28.597832 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 21 14:29:28 crc kubenswrapper[5133]: I1121 14:29:28.597958 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 21 14:29:28 crc kubenswrapper[5133]: I1121 14:29:28.598285 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-ztmk4" Nov 21 14:29:28 crc kubenswrapper[5133]: I1121 14:29:28.603051 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-5ldjx"] Nov 21 14:29:28 crc kubenswrapper[5133]: I1121 14:29:28.656794 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8c3c5c91-dbfc-49f4-ac40-95e59e1d1aa8-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-5ldjx\" (UID: \"8c3c5c91-dbfc-49f4-ac40-95e59e1d1aa8\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-5ldjx" Nov 21 14:29:28 crc kubenswrapper[5133]: I1121 14:29:28.656938 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/8c3c5c91-dbfc-49f4-ac40-95e59e1d1aa8-ceph\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-5ldjx\" (UID: \"8c3c5c91-dbfc-49f4-ac40-95e59e1d1aa8\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-5ldjx" Nov 21 14:29:28 crc kubenswrapper[5133]: I1121 14:29:28.656981 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n4lql\" (UniqueName: \"kubernetes.io/projected/8c3c5c91-dbfc-49f4-ac40-95e59e1d1aa8-kube-api-access-n4lql\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-5ldjx\" (UID: \"8c3c5c91-dbfc-49f4-ac40-95e59e1d1aa8\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-5ldjx" Nov 21 14:29:28 crc kubenswrapper[5133]: I1121 14:29:28.657054 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8c3c5c91-dbfc-49f4-ac40-95e59e1d1aa8-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-5ldjx\" (UID: \"8c3c5c91-dbfc-49f4-ac40-95e59e1d1aa8\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-5ldjx" Nov 21 14:29:28 crc kubenswrapper[5133]: I1121 14:29:28.758686 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8c3c5c91-dbfc-49f4-ac40-95e59e1d1aa8-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-5ldjx\" (UID: \"8c3c5c91-dbfc-49f4-ac40-95e59e1d1aa8\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-5ldjx" Nov 21 14:29:28 crc kubenswrapper[5133]: I1121 14:29:28.758873 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/8c3c5c91-dbfc-49f4-ac40-95e59e1d1aa8-ceph\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-5ldjx\" (UID: \"8c3c5c91-dbfc-49f4-ac40-95e59e1d1aa8\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-5ldjx" Nov 21 14:29:28 crc kubenswrapper[5133]: I1121 14:29:28.758923 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n4lql\" (UniqueName: \"kubernetes.io/projected/8c3c5c91-dbfc-49f4-ac40-95e59e1d1aa8-kube-api-access-n4lql\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-5ldjx\" (UID: \"8c3c5c91-dbfc-49f4-ac40-95e59e1d1aa8\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-5ldjx" Nov 21 14:29:28 crc kubenswrapper[5133]: I1121 14:29:28.758986 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8c3c5c91-dbfc-49f4-ac40-95e59e1d1aa8-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-5ldjx\" (UID: \"8c3c5c91-dbfc-49f4-ac40-95e59e1d1aa8\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-5ldjx" Nov 21 14:29:28 crc kubenswrapper[5133]: I1121 14:29:28.764234 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/8c3c5c91-dbfc-49f4-ac40-95e59e1d1aa8-ceph\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-5ldjx\" (UID: \"8c3c5c91-dbfc-49f4-ac40-95e59e1d1aa8\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-5ldjx" Nov 21 14:29:28 crc kubenswrapper[5133]: I1121 14:29:28.764434 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8c3c5c91-dbfc-49f4-ac40-95e59e1d1aa8-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-5ldjx\" (UID: \"8c3c5c91-dbfc-49f4-ac40-95e59e1d1aa8\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-5ldjx" Nov 21 14:29:28 crc kubenswrapper[5133]: I1121 14:29:28.765164 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8c3c5c91-dbfc-49f4-ac40-95e59e1d1aa8-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-5ldjx\" (UID: \"8c3c5c91-dbfc-49f4-ac40-95e59e1d1aa8\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-5ldjx" Nov 21 14:29:28 crc kubenswrapper[5133]: I1121 14:29:28.792939 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n4lql\" (UniqueName: \"kubernetes.io/projected/8c3c5c91-dbfc-49f4-ac40-95e59e1d1aa8-kube-api-access-n4lql\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-5ldjx\" (UID: \"8c3c5c91-dbfc-49f4-ac40-95e59e1d1aa8\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-5ldjx" Nov 21 14:29:28 crc kubenswrapper[5133]: I1121 14:29:28.913356 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-5ldjx" Nov 21 14:29:29 crc kubenswrapper[5133]: I1121 14:29:29.291121 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-5ldjx"] Nov 21 14:29:29 crc kubenswrapper[5133]: I1121 14:29:29.528730 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-5ldjx" event={"ID":"8c3c5c91-dbfc-49f4-ac40-95e59e1d1aa8","Type":"ContainerStarted","Data":"4a762ac4854e27aaaa37c9b21a17938f83c5a127d3f199ba4f2599e7efa3fb20"} Nov 21 14:29:30 crc kubenswrapper[5133]: I1121 14:29:30.538324 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-5ldjx" event={"ID":"8c3c5c91-dbfc-49f4-ac40-95e59e1d1aa8","Type":"ContainerStarted","Data":"2b7b9a221468d96d33f37bc8d89d45d3e9be39e4deffb65a656f42315de47a8e"} Nov 21 14:29:30 crc kubenswrapper[5133]: I1121 14:29:30.562537 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-5ldjx" podStartSLOduration=2.005301064 podStartE2EDuration="2.562516419s" podCreationTimestamp="2025-11-21 14:29:28 +0000 UTC" firstStartedPulling="2025-11-21 14:29:29.304073493 +0000 UTC m=+2829.101905741" lastFinishedPulling="2025-11-21 14:29:29.861288848 +0000 UTC m=+2829.659121096" observedRunningTime="2025-11-21 14:29:30.560677249 +0000 UTC m=+2830.358509527" watchObservedRunningTime="2025-11-21 14:29:30.562516419 +0000 UTC m=+2830.360348697" Nov 21 14:29:41 crc kubenswrapper[5133]: I1121 14:29:41.662068 5133 generic.go:334] "Generic (PLEG): container finished" podID="8c3c5c91-dbfc-49f4-ac40-95e59e1d1aa8" containerID="2b7b9a221468d96d33f37bc8d89d45d3e9be39e4deffb65a656f42315de47a8e" exitCode=0 Nov 21 14:29:41 crc kubenswrapper[5133]: I1121 14:29:41.662206 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-5ldjx" event={"ID":"8c3c5c91-dbfc-49f4-ac40-95e59e1d1aa8","Type":"ContainerDied","Data":"2b7b9a221468d96d33f37bc8d89d45d3e9be39e4deffb65a656f42315de47a8e"} Nov 21 14:29:43 crc kubenswrapper[5133]: I1121 14:29:43.139215 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-5ldjx" Nov 21 14:29:43 crc kubenswrapper[5133]: I1121 14:29:43.258936 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n4lql\" (UniqueName: \"kubernetes.io/projected/8c3c5c91-dbfc-49f4-ac40-95e59e1d1aa8-kube-api-access-n4lql\") pod \"8c3c5c91-dbfc-49f4-ac40-95e59e1d1aa8\" (UID: \"8c3c5c91-dbfc-49f4-ac40-95e59e1d1aa8\") " Nov 21 14:29:43 crc kubenswrapper[5133]: I1121 14:29:43.259312 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8c3c5c91-dbfc-49f4-ac40-95e59e1d1aa8-ssh-key\") pod \"8c3c5c91-dbfc-49f4-ac40-95e59e1d1aa8\" (UID: \"8c3c5c91-dbfc-49f4-ac40-95e59e1d1aa8\") " Nov 21 14:29:43 crc kubenswrapper[5133]: I1121 14:29:43.259358 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8c3c5c91-dbfc-49f4-ac40-95e59e1d1aa8-inventory\") pod \"8c3c5c91-dbfc-49f4-ac40-95e59e1d1aa8\" (UID: \"8c3c5c91-dbfc-49f4-ac40-95e59e1d1aa8\") " Nov 21 14:29:43 crc kubenswrapper[5133]: I1121 14:29:43.259403 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/8c3c5c91-dbfc-49f4-ac40-95e59e1d1aa8-ceph\") pod \"8c3c5c91-dbfc-49f4-ac40-95e59e1d1aa8\" (UID: \"8c3c5c91-dbfc-49f4-ac40-95e59e1d1aa8\") " Nov 21 14:29:43 crc kubenswrapper[5133]: I1121 14:29:43.264879 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8c3c5c91-dbfc-49f4-ac40-95e59e1d1aa8-ceph" (OuterVolumeSpecName: "ceph") pod "8c3c5c91-dbfc-49f4-ac40-95e59e1d1aa8" (UID: "8c3c5c91-dbfc-49f4-ac40-95e59e1d1aa8"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:29:43 crc kubenswrapper[5133]: I1121 14:29:43.264888 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8c3c5c91-dbfc-49f4-ac40-95e59e1d1aa8-kube-api-access-n4lql" (OuterVolumeSpecName: "kube-api-access-n4lql") pod "8c3c5c91-dbfc-49f4-ac40-95e59e1d1aa8" (UID: "8c3c5c91-dbfc-49f4-ac40-95e59e1d1aa8"). InnerVolumeSpecName "kube-api-access-n4lql". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:29:43 crc kubenswrapper[5133]: I1121 14:29:43.283322 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8c3c5c91-dbfc-49f4-ac40-95e59e1d1aa8-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "8c3c5c91-dbfc-49f4-ac40-95e59e1d1aa8" (UID: "8c3c5c91-dbfc-49f4-ac40-95e59e1d1aa8"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:29:43 crc kubenswrapper[5133]: I1121 14:29:43.289456 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8c3c5c91-dbfc-49f4-ac40-95e59e1d1aa8-inventory" (OuterVolumeSpecName: "inventory") pod "8c3c5c91-dbfc-49f4-ac40-95e59e1d1aa8" (UID: "8c3c5c91-dbfc-49f4-ac40-95e59e1d1aa8"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:29:43 crc kubenswrapper[5133]: I1121 14:29:43.362661 5133 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8c3c5c91-dbfc-49f4-ac40-95e59e1d1aa8-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 21 14:29:43 crc kubenswrapper[5133]: I1121 14:29:43.362785 5133 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8c3c5c91-dbfc-49f4-ac40-95e59e1d1aa8-inventory\") on node \"crc\" DevicePath \"\"" Nov 21 14:29:43 crc kubenswrapper[5133]: I1121 14:29:43.362850 5133 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/8c3c5c91-dbfc-49f4-ac40-95e59e1d1aa8-ceph\") on node \"crc\" DevicePath \"\"" Nov 21 14:29:43 crc kubenswrapper[5133]: I1121 14:29:43.362923 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n4lql\" (UniqueName: \"kubernetes.io/projected/8c3c5c91-dbfc-49f4-ac40-95e59e1d1aa8-kube-api-access-n4lql\") on node \"crc\" DevicePath \"\"" Nov 21 14:29:43 crc kubenswrapper[5133]: I1121 14:29:43.689404 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-5ldjx" event={"ID":"8c3c5c91-dbfc-49f4-ac40-95e59e1d1aa8","Type":"ContainerDied","Data":"4a762ac4854e27aaaa37c9b21a17938f83c5a127d3f199ba4f2599e7efa3fb20"} Nov 21 14:29:43 crc kubenswrapper[5133]: I1121 14:29:43.689448 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4a762ac4854e27aaaa37c9b21a17938f83c5a127d3f199ba4f2599e7efa3fb20" Nov 21 14:29:43 crc kubenswrapper[5133]: I1121 14:29:43.689455 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-5ldjx" Nov 21 14:29:43 crc kubenswrapper[5133]: I1121 14:29:43.789232 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb"] Nov 21 14:29:43 crc kubenswrapper[5133]: E1121 14:29:43.789834 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8c3c5c91-dbfc-49f4-ac40-95e59e1d1aa8" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 21 14:29:43 crc kubenswrapper[5133]: I1121 14:29:43.789868 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="8c3c5c91-dbfc-49f4-ac40-95e59e1d1aa8" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 21 14:29:43 crc kubenswrapper[5133]: I1121 14:29:43.791183 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="8c3c5c91-dbfc-49f4-ac40-95e59e1d1aa8" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 21 14:29:43 crc kubenswrapper[5133]: I1121 14:29:43.792101 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb" Nov 21 14:29:43 crc kubenswrapper[5133]: I1121 14:29:43.794314 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-neutron-metadata-default-certs-0" Nov 21 14:29:43 crc kubenswrapper[5133]: I1121 14:29:43.794578 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 21 14:29:43 crc kubenswrapper[5133]: I1121 14:29:43.794992 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 21 14:29:43 crc kubenswrapper[5133]: I1121 14:29:43.803270 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 21 14:29:43 crc kubenswrapper[5133]: I1121 14:29:43.803381 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-libvirt-default-certs-0" Nov 21 14:29:43 crc kubenswrapper[5133]: I1121 14:29:43.803642 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-ovn-default-certs-0" Nov 21 14:29:43 crc kubenswrapper[5133]: I1121 14:29:43.803735 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 21 14:29:43 crc kubenswrapper[5133]: I1121 14:29:43.803816 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-ztmk4" Nov 21 14:29:43 crc kubenswrapper[5133]: I1121 14:29:43.804455 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb"] Nov 21 14:29:43 crc kubenswrapper[5133]: I1121 14:29:43.872324 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb\" (UID: \"d56ab399-fd3b-41d9-a915-ecf9bd15d5c8\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb" Nov 21 14:29:43 crc kubenswrapper[5133]: I1121 14:29:43.872483 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb\" (UID: \"d56ab399-fd3b-41d9-a915-ecf9bd15d5c8\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb" Nov 21 14:29:43 crc kubenswrapper[5133]: I1121 14:29:43.872523 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb\" (UID: \"d56ab399-fd3b-41d9-a915-ecf9bd15d5c8\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb" Nov 21 14:29:43 crc kubenswrapper[5133]: I1121 14:29:43.872550 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-ceph\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb\" (UID: \"d56ab399-fd3b-41d9-a915-ecf9bd15d5c8\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb" Nov 21 14:29:43 crc kubenswrapper[5133]: I1121 14:29:43.872577 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-67dkr\" (UniqueName: \"kubernetes.io/projected/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-kube-api-access-67dkr\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb\" (UID: \"d56ab399-fd3b-41d9-a915-ecf9bd15d5c8\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb" Nov 21 14:29:43 crc kubenswrapper[5133]: I1121 14:29:43.872615 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb\" (UID: \"d56ab399-fd3b-41d9-a915-ecf9bd15d5c8\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb" Nov 21 14:29:43 crc kubenswrapper[5133]: I1121 14:29:43.872733 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb\" (UID: \"d56ab399-fd3b-41d9-a915-ecf9bd15d5c8\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb" Nov 21 14:29:43 crc kubenswrapper[5133]: I1121 14:29:43.872760 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb\" (UID: \"d56ab399-fd3b-41d9-a915-ecf9bd15d5c8\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb" Nov 21 14:29:43 crc kubenswrapper[5133]: I1121 14:29:43.872793 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb\" (UID: \"d56ab399-fd3b-41d9-a915-ecf9bd15d5c8\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb" Nov 21 14:29:43 crc kubenswrapper[5133]: I1121 14:29:43.873052 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb\" (UID: \"d56ab399-fd3b-41d9-a915-ecf9bd15d5c8\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb" Nov 21 14:29:43 crc kubenswrapper[5133]: I1121 14:29:43.873193 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb\" (UID: \"d56ab399-fd3b-41d9-a915-ecf9bd15d5c8\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb" Nov 21 14:29:43 crc kubenswrapper[5133]: I1121 14:29:43.873271 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb\" (UID: \"d56ab399-fd3b-41d9-a915-ecf9bd15d5c8\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb" Nov 21 14:29:43 crc kubenswrapper[5133]: I1121 14:29:43.873346 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb\" (UID: \"d56ab399-fd3b-41d9-a915-ecf9bd15d5c8\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb" Nov 21 14:29:43 crc kubenswrapper[5133]: I1121 14:29:43.975376 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb\" (UID: \"d56ab399-fd3b-41d9-a915-ecf9bd15d5c8\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb" Nov 21 14:29:43 crc kubenswrapper[5133]: I1121 14:29:43.975731 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb\" (UID: \"d56ab399-fd3b-41d9-a915-ecf9bd15d5c8\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb" Nov 21 14:29:43 crc kubenswrapper[5133]: I1121 14:29:43.975766 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb\" (UID: \"d56ab399-fd3b-41d9-a915-ecf9bd15d5c8\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb" Nov 21 14:29:43 crc kubenswrapper[5133]: I1121 14:29:43.975790 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb\" (UID: \"d56ab399-fd3b-41d9-a915-ecf9bd15d5c8\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb" Nov 21 14:29:43 crc kubenswrapper[5133]: I1121 14:29:43.975823 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb\" (UID: \"d56ab399-fd3b-41d9-a915-ecf9bd15d5c8\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb" Nov 21 14:29:43 crc kubenswrapper[5133]: I1121 14:29:43.975898 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb\" (UID: \"d56ab399-fd3b-41d9-a915-ecf9bd15d5c8\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb" Nov 21 14:29:43 crc kubenswrapper[5133]: I1121 14:29:43.975926 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb\" (UID: \"d56ab399-fd3b-41d9-a915-ecf9bd15d5c8\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb" Nov 21 14:29:43 crc kubenswrapper[5133]: I1121 14:29:43.975948 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-ceph\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb\" (UID: \"d56ab399-fd3b-41d9-a915-ecf9bd15d5c8\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb" Nov 21 14:29:43 crc kubenswrapper[5133]: I1121 14:29:43.975972 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-67dkr\" (UniqueName: \"kubernetes.io/projected/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-kube-api-access-67dkr\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb\" (UID: \"d56ab399-fd3b-41d9-a915-ecf9bd15d5c8\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb" Nov 21 14:29:43 crc kubenswrapper[5133]: I1121 14:29:43.976040 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb\" (UID: \"d56ab399-fd3b-41d9-a915-ecf9bd15d5c8\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb" Nov 21 14:29:43 crc kubenswrapper[5133]: I1121 14:29:43.976120 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb\" (UID: \"d56ab399-fd3b-41d9-a915-ecf9bd15d5c8\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb" Nov 21 14:29:43 crc kubenswrapper[5133]: I1121 14:29:43.976143 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb\" (UID: \"d56ab399-fd3b-41d9-a915-ecf9bd15d5c8\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb" Nov 21 14:29:43 crc kubenswrapper[5133]: I1121 14:29:43.976171 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb\" (UID: \"d56ab399-fd3b-41d9-a915-ecf9bd15d5c8\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb" Nov 21 14:29:43 crc kubenswrapper[5133]: I1121 14:29:43.985237 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb\" (UID: \"d56ab399-fd3b-41d9-a915-ecf9bd15d5c8\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb" Nov 21 14:29:43 crc kubenswrapper[5133]: I1121 14:29:43.987909 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb\" (UID: \"d56ab399-fd3b-41d9-a915-ecf9bd15d5c8\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb" Nov 21 14:29:43 crc kubenswrapper[5133]: I1121 14:29:43.990262 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb\" (UID: \"d56ab399-fd3b-41d9-a915-ecf9bd15d5c8\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb" Nov 21 14:29:43 crc kubenswrapper[5133]: I1121 14:29:43.992825 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb\" (UID: \"d56ab399-fd3b-41d9-a915-ecf9bd15d5c8\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb" Nov 21 14:29:43 crc kubenswrapper[5133]: I1121 14:29:43.996214 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb\" (UID: \"d56ab399-fd3b-41d9-a915-ecf9bd15d5c8\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb" Nov 21 14:29:43 crc kubenswrapper[5133]: I1121 14:29:43.998482 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb\" (UID: \"d56ab399-fd3b-41d9-a915-ecf9bd15d5c8\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb" Nov 21 14:29:44 crc kubenswrapper[5133]: I1121 14:29:44.000789 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb\" (UID: \"d56ab399-fd3b-41d9-a915-ecf9bd15d5c8\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb" Nov 21 14:29:44 crc kubenswrapper[5133]: I1121 14:29:44.003343 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-ceph\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb\" (UID: \"d56ab399-fd3b-41d9-a915-ecf9bd15d5c8\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb" Nov 21 14:29:44 crc kubenswrapper[5133]: I1121 14:29:44.005364 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb\" (UID: \"d56ab399-fd3b-41d9-a915-ecf9bd15d5c8\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb" Nov 21 14:29:44 crc kubenswrapper[5133]: I1121 14:29:44.006417 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb\" (UID: \"d56ab399-fd3b-41d9-a915-ecf9bd15d5c8\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb" Nov 21 14:29:44 crc kubenswrapper[5133]: I1121 14:29:44.007956 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb\" (UID: \"d56ab399-fd3b-41d9-a915-ecf9bd15d5c8\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb" Nov 21 14:29:44 crc kubenswrapper[5133]: I1121 14:29:44.008078 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb\" (UID: \"d56ab399-fd3b-41d9-a915-ecf9bd15d5c8\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb" Nov 21 14:29:44 crc kubenswrapper[5133]: I1121 14:29:44.023592 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-67dkr\" (UniqueName: \"kubernetes.io/projected/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-kube-api-access-67dkr\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb\" (UID: \"d56ab399-fd3b-41d9-a915-ecf9bd15d5c8\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb" Nov 21 14:29:44 crc kubenswrapper[5133]: I1121 14:29:44.120138 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb" Nov 21 14:29:44 crc kubenswrapper[5133]: I1121 14:29:44.649488 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb"] Nov 21 14:29:44 crc kubenswrapper[5133]: I1121 14:29:44.699716 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb" event={"ID":"d56ab399-fd3b-41d9-a915-ecf9bd15d5c8","Type":"ContainerStarted","Data":"c5fca2943414b1bdd766de49daa263a2c56204f003951cd00287878f8d5f173c"} Nov 21 14:29:45 crc kubenswrapper[5133]: I1121 14:29:45.712692 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb" event={"ID":"d56ab399-fd3b-41d9-a915-ecf9bd15d5c8","Type":"ContainerStarted","Data":"f066bdcaa867dc6cb07891f9107cc1d86f27f639947e32c640d0eb0f0e1d6318"} Nov 21 14:29:45 crc kubenswrapper[5133]: I1121 14:29:45.747147 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-tr94n"] Nov 21 14:29:45 crc kubenswrapper[5133]: I1121 14:29:45.749441 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tr94n" Nov 21 14:29:45 crc kubenswrapper[5133]: I1121 14:29:45.751630 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb" podStartSLOduration=2.061029686 podStartE2EDuration="2.75160294s" podCreationTimestamp="2025-11-21 14:29:43 +0000 UTC" firstStartedPulling="2025-11-21 14:29:44.656417198 +0000 UTC m=+2844.454249446" lastFinishedPulling="2025-11-21 14:29:45.346990452 +0000 UTC m=+2845.144822700" observedRunningTime="2025-11-21 14:29:45.734320725 +0000 UTC m=+2845.532152983" watchObservedRunningTime="2025-11-21 14:29:45.75160294 +0000 UTC m=+2845.549435208" Nov 21 14:29:45 crc kubenswrapper[5133]: I1121 14:29:45.777136 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-tr94n"] Nov 21 14:29:45 crc kubenswrapper[5133]: I1121 14:29:45.816240 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nmrnf\" (UniqueName: \"kubernetes.io/projected/c2e3c10a-3e5d-4bc6-86b9-55342aaa5315-kube-api-access-nmrnf\") pod \"certified-operators-tr94n\" (UID: \"c2e3c10a-3e5d-4bc6-86b9-55342aaa5315\") " pod="openshift-marketplace/certified-operators-tr94n" Nov 21 14:29:45 crc kubenswrapper[5133]: I1121 14:29:45.816383 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c2e3c10a-3e5d-4bc6-86b9-55342aaa5315-utilities\") pod \"certified-operators-tr94n\" (UID: \"c2e3c10a-3e5d-4bc6-86b9-55342aaa5315\") " pod="openshift-marketplace/certified-operators-tr94n" Nov 21 14:29:45 crc kubenswrapper[5133]: I1121 14:29:45.816537 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c2e3c10a-3e5d-4bc6-86b9-55342aaa5315-catalog-content\") pod \"certified-operators-tr94n\" (UID: \"c2e3c10a-3e5d-4bc6-86b9-55342aaa5315\") " pod="openshift-marketplace/certified-operators-tr94n" Nov 21 14:29:45 crc kubenswrapper[5133]: I1121 14:29:45.918426 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nmrnf\" (UniqueName: \"kubernetes.io/projected/c2e3c10a-3e5d-4bc6-86b9-55342aaa5315-kube-api-access-nmrnf\") pod \"certified-operators-tr94n\" (UID: \"c2e3c10a-3e5d-4bc6-86b9-55342aaa5315\") " pod="openshift-marketplace/certified-operators-tr94n" Nov 21 14:29:45 crc kubenswrapper[5133]: I1121 14:29:45.918482 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c2e3c10a-3e5d-4bc6-86b9-55342aaa5315-utilities\") pod \"certified-operators-tr94n\" (UID: \"c2e3c10a-3e5d-4bc6-86b9-55342aaa5315\") " pod="openshift-marketplace/certified-operators-tr94n" Nov 21 14:29:45 crc kubenswrapper[5133]: I1121 14:29:45.918556 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c2e3c10a-3e5d-4bc6-86b9-55342aaa5315-catalog-content\") pod \"certified-operators-tr94n\" (UID: \"c2e3c10a-3e5d-4bc6-86b9-55342aaa5315\") " pod="openshift-marketplace/certified-operators-tr94n" Nov 21 14:29:45 crc kubenswrapper[5133]: I1121 14:29:45.919117 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c2e3c10a-3e5d-4bc6-86b9-55342aaa5315-utilities\") pod \"certified-operators-tr94n\" (UID: \"c2e3c10a-3e5d-4bc6-86b9-55342aaa5315\") " pod="openshift-marketplace/certified-operators-tr94n" Nov 21 14:29:45 crc kubenswrapper[5133]: I1121 14:29:45.919148 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c2e3c10a-3e5d-4bc6-86b9-55342aaa5315-catalog-content\") pod \"certified-operators-tr94n\" (UID: \"c2e3c10a-3e5d-4bc6-86b9-55342aaa5315\") " pod="openshift-marketplace/certified-operators-tr94n" Nov 21 14:29:45 crc kubenswrapper[5133]: I1121 14:29:45.946376 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nmrnf\" (UniqueName: \"kubernetes.io/projected/c2e3c10a-3e5d-4bc6-86b9-55342aaa5315-kube-api-access-nmrnf\") pod \"certified-operators-tr94n\" (UID: \"c2e3c10a-3e5d-4bc6-86b9-55342aaa5315\") " pod="openshift-marketplace/certified-operators-tr94n" Nov 21 14:29:46 crc kubenswrapper[5133]: I1121 14:29:46.078664 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tr94n" Nov 21 14:29:46 crc kubenswrapper[5133]: I1121 14:29:46.551761 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-tr94n"] Nov 21 14:29:46 crc kubenswrapper[5133]: I1121 14:29:46.724678 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tr94n" event={"ID":"c2e3c10a-3e5d-4bc6-86b9-55342aaa5315","Type":"ContainerStarted","Data":"c9554979cb9a57c6a12793898dc55c330805894e084f08e803312eaa395fc66f"} Nov 21 14:29:47 crc kubenswrapper[5133]: I1121 14:29:47.736162 5133 generic.go:334] "Generic (PLEG): container finished" podID="c2e3c10a-3e5d-4bc6-86b9-55342aaa5315" containerID="e1402b7818793031c605ef1b81c0bf6cea1f906a2785730f9d894af6cc97648a" exitCode=0 Nov 21 14:29:47 crc kubenswrapper[5133]: I1121 14:29:47.736335 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tr94n" event={"ID":"c2e3c10a-3e5d-4bc6-86b9-55342aaa5315","Type":"ContainerDied","Data":"e1402b7818793031c605ef1b81c0bf6cea1f906a2785730f9d894af6cc97648a"} Nov 21 14:29:49 crc kubenswrapper[5133]: I1121 14:29:49.758360 5133 generic.go:334] "Generic (PLEG): container finished" podID="c2e3c10a-3e5d-4bc6-86b9-55342aaa5315" containerID="866735e2eb09be808ea096490b1374224628108988555f1cd7a01d11248d7d80" exitCode=0 Nov 21 14:29:49 crc kubenswrapper[5133]: I1121 14:29:49.758850 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tr94n" event={"ID":"c2e3c10a-3e5d-4bc6-86b9-55342aaa5315","Type":"ContainerDied","Data":"866735e2eb09be808ea096490b1374224628108988555f1cd7a01d11248d7d80"} Nov 21 14:29:50 crc kubenswrapper[5133]: I1121 14:29:50.778540 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tr94n" event={"ID":"c2e3c10a-3e5d-4bc6-86b9-55342aaa5315","Type":"ContainerStarted","Data":"b530c174c48fb44f8370fcf3a60a2a78cebebafb81a2013eaaa409024560590a"} Nov 21 14:29:50 crc kubenswrapper[5133]: I1121 14:29:50.808162 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-tr94n" podStartSLOduration=3.374617707 podStartE2EDuration="5.808145255s" podCreationTimestamp="2025-11-21 14:29:45 +0000 UTC" firstStartedPulling="2025-11-21 14:29:47.741111239 +0000 UTC m=+2847.538943537" lastFinishedPulling="2025-11-21 14:29:50.174638837 +0000 UTC m=+2849.972471085" observedRunningTime="2025-11-21 14:29:50.797327154 +0000 UTC m=+2850.595159412" watchObservedRunningTime="2025-11-21 14:29:50.808145255 +0000 UTC m=+2850.605977503" Nov 21 14:29:53 crc kubenswrapper[5133]: I1121 14:29:53.310705 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 14:29:53 crc kubenswrapper[5133]: I1121 14:29:53.311236 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 14:29:53 crc kubenswrapper[5133]: I1121 14:29:53.311318 5133 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" Nov 21 14:29:53 crc kubenswrapper[5133]: I1121 14:29:53.312466 5133 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"1f1186518ac0102818dd70469c0c9f88a271d9007ab1854097e5d4f6bcfd7ea2"} pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 14:29:53 crc kubenswrapper[5133]: I1121 14:29:53.312601 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" containerID="cri-o://1f1186518ac0102818dd70469c0c9f88a271d9007ab1854097e5d4f6bcfd7ea2" gracePeriod=600 Nov 21 14:29:53 crc kubenswrapper[5133]: I1121 14:29:53.809928 5133 generic.go:334] "Generic (PLEG): container finished" podID="52f5a729-05d1-4f84-a216-1df3233af57d" containerID="1f1186518ac0102818dd70469c0c9f88a271d9007ab1854097e5d4f6bcfd7ea2" exitCode=0 Nov 21 14:29:53 crc kubenswrapper[5133]: I1121 14:29:53.809972 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" event={"ID":"52f5a729-05d1-4f84-a216-1df3233af57d","Type":"ContainerDied","Data":"1f1186518ac0102818dd70469c0c9f88a271d9007ab1854097e5d4f6bcfd7ea2"} Nov 21 14:29:53 crc kubenswrapper[5133]: I1121 14:29:53.810260 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" event={"ID":"52f5a729-05d1-4f84-a216-1df3233af57d","Type":"ContainerStarted","Data":"76b8364e4af45cbd19eaaf4b68ddf1c52013121c27d1586f3a2c762b2e050cef"} Nov 21 14:29:53 crc kubenswrapper[5133]: I1121 14:29:53.810282 5133 scope.go:117] "RemoveContainer" containerID="c196595136e13d3c4c63ae7c08c87ab795a910aa0f4343c6ca13e03e51955a22" Nov 21 14:29:56 crc kubenswrapper[5133]: I1121 14:29:56.079145 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-tr94n" Nov 21 14:29:56 crc kubenswrapper[5133]: I1121 14:29:56.080688 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-tr94n" Nov 21 14:29:56 crc kubenswrapper[5133]: I1121 14:29:56.152705 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-tr94n" Nov 21 14:29:56 crc kubenswrapper[5133]: I1121 14:29:56.884214 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-tr94n" Nov 21 14:29:56 crc kubenswrapper[5133]: I1121 14:29:56.938134 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-tr94n"] Nov 21 14:29:58 crc kubenswrapper[5133]: I1121 14:29:58.855489 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-tr94n" podUID="c2e3c10a-3e5d-4bc6-86b9-55342aaa5315" containerName="registry-server" containerID="cri-o://b530c174c48fb44f8370fcf3a60a2a78cebebafb81a2013eaaa409024560590a" gracePeriod=2 Nov 21 14:29:59 crc kubenswrapper[5133]: I1121 14:29:59.311758 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tr94n" Nov 21 14:29:59 crc kubenswrapper[5133]: I1121 14:29:59.399780 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nmrnf\" (UniqueName: \"kubernetes.io/projected/c2e3c10a-3e5d-4bc6-86b9-55342aaa5315-kube-api-access-nmrnf\") pod \"c2e3c10a-3e5d-4bc6-86b9-55342aaa5315\" (UID: \"c2e3c10a-3e5d-4bc6-86b9-55342aaa5315\") " Nov 21 14:29:59 crc kubenswrapper[5133]: I1121 14:29:59.399884 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c2e3c10a-3e5d-4bc6-86b9-55342aaa5315-catalog-content\") pod \"c2e3c10a-3e5d-4bc6-86b9-55342aaa5315\" (UID: \"c2e3c10a-3e5d-4bc6-86b9-55342aaa5315\") " Nov 21 14:29:59 crc kubenswrapper[5133]: I1121 14:29:59.400157 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c2e3c10a-3e5d-4bc6-86b9-55342aaa5315-utilities\") pod \"c2e3c10a-3e5d-4bc6-86b9-55342aaa5315\" (UID: \"c2e3c10a-3e5d-4bc6-86b9-55342aaa5315\") " Nov 21 14:29:59 crc kubenswrapper[5133]: I1121 14:29:59.403589 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c2e3c10a-3e5d-4bc6-86b9-55342aaa5315-utilities" (OuterVolumeSpecName: "utilities") pod "c2e3c10a-3e5d-4bc6-86b9-55342aaa5315" (UID: "c2e3c10a-3e5d-4bc6-86b9-55342aaa5315"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:29:59 crc kubenswrapper[5133]: I1121 14:29:59.407213 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c2e3c10a-3e5d-4bc6-86b9-55342aaa5315-kube-api-access-nmrnf" (OuterVolumeSpecName: "kube-api-access-nmrnf") pod "c2e3c10a-3e5d-4bc6-86b9-55342aaa5315" (UID: "c2e3c10a-3e5d-4bc6-86b9-55342aaa5315"). InnerVolumeSpecName "kube-api-access-nmrnf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:29:59 crc kubenswrapper[5133]: I1121 14:29:59.461548 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c2e3c10a-3e5d-4bc6-86b9-55342aaa5315-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c2e3c10a-3e5d-4bc6-86b9-55342aaa5315" (UID: "c2e3c10a-3e5d-4bc6-86b9-55342aaa5315"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:29:59 crc kubenswrapper[5133]: I1121 14:29:59.502927 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nmrnf\" (UniqueName: \"kubernetes.io/projected/c2e3c10a-3e5d-4bc6-86b9-55342aaa5315-kube-api-access-nmrnf\") on node \"crc\" DevicePath \"\"" Nov 21 14:29:59 crc kubenswrapper[5133]: I1121 14:29:59.502966 5133 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c2e3c10a-3e5d-4bc6-86b9-55342aaa5315-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 14:29:59 crc kubenswrapper[5133]: I1121 14:29:59.502977 5133 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c2e3c10a-3e5d-4bc6-86b9-55342aaa5315-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 14:29:59 crc kubenswrapper[5133]: I1121 14:29:59.867435 5133 generic.go:334] "Generic (PLEG): container finished" podID="c2e3c10a-3e5d-4bc6-86b9-55342aaa5315" containerID="b530c174c48fb44f8370fcf3a60a2a78cebebafb81a2013eaaa409024560590a" exitCode=0 Nov 21 14:29:59 crc kubenswrapper[5133]: I1121 14:29:59.867490 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tr94n" Nov 21 14:29:59 crc kubenswrapper[5133]: I1121 14:29:59.867479 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tr94n" event={"ID":"c2e3c10a-3e5d-4bc6-86b9-55342aaa5315","Type":"ContainerDied","Data":"b530c174c48fb44f8370fcf3a60a2a78cebebafb81a2013eaaa409024560590a"} Nov 21 14:29:59 crc kubenswrapper[5133]: I1121 14:29:59.867832 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tr94n" event={"ID":"c2e3c10a-3e5d-4bc6-86b9-55342aaa5315","Type":"ContainerDied","Data":"c9554979cb9a57c6a12793898dc55c330805894e084f08e803312eaa395fc66f"} Nov 21 14:29:59 crc kubenswrapper[5133]: I1121 14:29:59.867850 5133 scope.go:117] "RemoveContainer" containerID="b530c174c48fb44f8370fcf3a60a2a78cebebafb81a2013eaaa409024560590a" Nov 21 14:29:59 crc kubenswrapper[5133]: I1121 14:29:59.897384 5133 scope.go:117] "RemoveContainer" containerID="866735e2eb09be808ea096490b1374224628108988555f1cd7a01d11248d7d80" Nov 21 14:29:59 crc kubenswrapper[5133]: I1121 14:29:59.907893 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-tr94n"] Nov 21 14:29:59 crc kubenswrapper[5133]: I1121 14:29:59.915120 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-tr94n"] Nov 21 14:29:59 crc kubenswrapper[5133]: I1121 14:29:59.922386 5133 scope.go:117] "RemoveContainer" containerID="e1402b7818793031c605ef1b81c0bf6cea1f906a2785730f9d894af6cc97648a" Nov 21 14:29:59 crc kubenswrapper[5133]: I1121 14:29:59.963304 5133 scope.go:117] "RemoveContainer" containerID="b530c174c48fb44f8370fcf3a60a2a78cebebafb81a2013eaaa409024560590a" Nov 21 14:29:59 crc kubenswrapper[5133]: E1121 14:29:59.963766 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b530c174c48fb44f8370fcf3a60a2a78cebebafb81a2013eaaa409024560590a\": container with ID starting with b530c174c48fb44f8370fcf3a60a2a78cebebafb81a2013eaaa409024560590a not found: ID does not exist" containerID="b530c174c48fb44f8370fcf3a60a2a78cebebafb81a2013eaaa409024560590a" Nov 21 14:29:59 crc kubenswrapper[5133]: I1121 14:29:59.963799 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b530c174c48fb44f8370fcf3a60a2a78cebebafb81a2013eaaa409024560590a"} err="failed to get container status \"b530c174c48fb44f8370fcf3a60a2a78cebebafb81a2013eaaa409024560590a\": rpc error: code = NotFound desc = could not find container \"b530c174c48fb44f8370fcf3a60a2a78cebebafb81a2013eaaa409024560590a\": container with ID starting with b530c174c48fb44f8370fcf3a60a2a78cebebafb81a2013eaaa409024560590a not found: ID does not exist" Nov 21 14:29:59 crc kubenswrapper[5133]: I1121 14:29:59.963820 5133 scope.go:117] "RemoveContainer" containerID="866735e2eb09be808ea096490b1374224628108988555f1cd7a01d11248d7d80" Nov 21 14:29:59 crc kubenswrapper[5133]: E1121 14:29:59.964348 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"866735e2eb09be808ea096490b1374224628108988555f1cd7a01d11248d7d80\": container with ID starting with 866735e2eb09be808ea096490b1374224628108988555f1cd7a01d11248d7d80 not found: ID does not exist" containerID="866735e2eb09be808ea096490b1374224628108988555f1cd7a01d11248d7d80" Nov 21 14:29:59 crc kubenswrapper[5133]: I1121 14:29:59.964395 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"866735e2eb09be808ea096490b1374224628108988555f1cd7a01d11248d7d80"} err="failed to get container status \"866735e2eb09be808ea096490b1374224628108988555f1cd7a01d11248d7d80\": rpc error: code = NotFound desc = could not find container \"866735e2eb09be808ea096490b1374224628108988555f1cd7a01d11248d7d80\": container with ID starting with 866735e2eb09be808ea096490b1374224628108988555f1cd7a01d11248d7d80 not found: ID does not exist" Nov 21 14:29:59 crc kubenswrapper[5133]: I1121 14:29:59.964431 5133 scope.go:117] "RemoveContainer" containerID="e1402b7818793031c605ef1b81c0bf6cea1f906a2785730f9d894af6cc97648a" Nov 21 14:29:59 crc kubenswrapper[5133]: E1121 14:29:59.964708 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e1402b7818793031c605ef1b81c0bf6cea1f906a2785730f9d894af6cc97648a\": container with ID starting with e1402b7818793031c605ef1b81c0bf6cea1f906a2785730f9d894af6cc97648a not found: ID does not exist" containerID="e1402b7818793031c605ef1b81c0bf6cea1f906a2785730f9d894af6cc97648a" Nov 21 14:29:59 crc kubenswrapper[5133]: I1121 14:29:59.964746 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e1402b7818793031c605ef1b81c0bf6cea1f906a2785730f9d894af6cc97648a"} err="failed to get container status \"e1402b7818793031c605ef1b81c0bf6cea1f906a2785730f9d894af6cc97648a\": rpc error: code = NotFound desc = could not find container \"e1402b7818793031c605ef1b81c0bf6cea1f906a2785730f9d894af6cc97648a\": container with ID starting with e1402b7818793031c605ef1b81c0bf6cea1f906a2785730f9d894af6cc97648a not found: ID does not exist" Nov 21 14:30:00 crc kubenswrapper[5133]: I1121 14:30:00.130554 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395590-w6qvq"] Nov 21 14:30:00 crc kubenswrapper[5133]: E1121 14:30:00.131032 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2e3c10a-3e5d-4bc6-86b9-55342aaa5315" containerName="extract-content" Nov 21 14:30:00 crc kubenswrapper[5133]: I1121 14:30:00.131054 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2e3c10a-3e5d-4bc6-86b9-55342aaa5315" containerName="extract-content" Nov 21 14:30:00 crc kubenswrapper[5133]: E1121 14:30:00.131289 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2e3c10a-3e5d-4bc6-86b9-55342aaa5315" containerName="registry-server" Nov 21 14:30:00 crc kubenswrapper[5133]: I1121 14:30:00.131298 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2e3c10a-3e5d-4bc6-86b9-55342aaa5315" containerName="registry-server" Nov 21 14:30:00 crc kubenswrapper[5133]: E1121 14:30:00.131334 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2e3c10a-3e5d-4bc6-86b9-55342aaa5315" containerName="extract-utilities" Nov 21 14:30:00 crc kubenswrapper[5133]: I1121 14:30:00.131341 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2e3c10a-3e5d-4bc6-86b9-55342aaa5315" containerName="extract-utilities" Nov 21 14:30:00 crc kubenswrapper[5133]: I1121 14:30:00.131522 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="c2e3c10a-3e5d-4bc6-86b9-55342aaa5315" containerName="registry-server" Nov 21 14:30:00 crc kubenswrapper[5133]: I1121 14:30:00.132386 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395590-w6qvq" Nov 21 14:30:00 crc kubenswrapper[5133]: I1121 14:30:00.134291 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 21 14:30:00 crc kubenswrapper[5133]: I1121 14:30:00.134886 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 21 14:30:00 crc kubenswrapper[5133]: I1121 14:30:00.141928 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395590-w6qvq"] Nov 21 14:30:00 crc kubenswrapper[5133]: I1121 14:30:00.214701 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/88666366-11ef-4546-b82f-6a71b10bb115-config-volume\") pod \"collect-profiles-29395590-w6qvq\" (UID: \"88666366-11ef-4546-b82f-6a71b10bb115\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395590-w6qvq" Nov 21 14:30:00 crc kubenswrapper[5133]: I1121 14:30:00.214868 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/88666366-11ef-4546-b82f-6a71b10bb115-secret-volume\") pod \"collect-profiles-29395590-w6qvq\" (UID: \"88666366-11ef-4546-b82f-6a71b10bb115\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395590-w6qvq" Nov 21 14:30:00 crc kubenswrapper[5133]: I1121 14:30:00.214929 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7h2fv\" (UniqueName: \"kubernetes.io/projected/88666366-11ef-4546-b82f-6a71b10bb115-kube-api-access-7h2fv\") pod \"collect-profiles-29395590-w6qvq\" (UID: \"88666366-11ef-4546-b82f-6a71b10bb115\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395590-w6qvq" Nov 21 14:30:00 crc kubenswrapper[5133]: I1121 14:30:00.316895 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/88666366-11ef-4546-b82f-6a71b10bb115-config-volume\") pod \"collect-profiles-29395590-w6qvq\" (UID: \"88666366-11ef-4546-b82f-6a71b10bb115\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395590-w6qvq" Nov 21 14:30:00 crc kubenswrapper[5133]: I1121 14:30:00.317046 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/88666366-11ef-4546-b82f-6a71b10bb115-secret-volume\") pod \"collect-profiles-29395590-w6qvq\" (UID: \"88666366-11ef-4546-b82f-6a71b10bb115\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395590-w6qvq" Nov 21 14:30:00 crc kubenswrapper[5133]: I1121 14:30:00.317096 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7h2fv\" (UniqueName: \"kubernetes.io/projected/88666366-11ef-4546-b82f-6a71b10bb115-kube-api-access-7h2fv\") pod \"collect-profiles-29395590-w6qvq\" (UID: \"88666366-11ef-4546-b82f-6a71b10bb115\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395590-w6qvq" Nov 21 14:30:00 crc kubenswrapper[5133]: I1121 14:30:00.318104 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/88666366-11ef-4546-b82f-6a71b10bb115-config-volume\") pod \"collect-profiles-29395590-w6qvq\" (UID: \"88666366-11ef-4546-b82f-6a71b10bb115\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395590-w6qvq" Nov 21 14:30:00 crc kubenswrapper[5133]: I1121 14:30:00.324127 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/88666366-11ef-4546-b82f-6a71b10bb115-secret-volume\") pod \"collect-profiles-29395590-w6qvq\" (UID: \"88666366-11ef-4546-b82f-6a71b10bb115\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395590-w6qvq" Nov 21 14:30:00 crc kubenswrapper[5133]: I1121 14:30:00.343142 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7h2fv\" (UniqueName: \"kubernetes.io/projected/88666366-11ef-4546-b82f-6a71b10bb115-kube-api-access-7h2fv\") pod \"collect-profiles-29395590-w6qvq\" (UID: \"88666366-11ef-4546-b82f-6a71b10bb115\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395590-w6qvq" Nov 21 14:30:00 crc kubenswrapper[5133]: I1121 14:30:00.450567 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395590-w6qvq" Nov 21 14:30:00 crc kubenswrapper[5133]: I1121 14:30:00.467530 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c2e3c10a-3e5d-4bc6-86b9-55342aaa5315" path="/var/lib/kubelet/pods/c2e3c10a-3e5d-4bc6-86b9-55342aaa5315/volumes" Nov 21 14:30:00 crc kubenswrapper[5133]: I1121 14:30:00.883305 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395590-w6qvq"] Nov 21 14:30:01 crc kubenswrapper[5133]: I1121 14:30:01.809246 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-q2sf7"] Nov 21 14:30:01 crc kubenswrapper[5133]: I1121 14:30:01.812342 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-q2sf7" Nov 21 14:30:01 crc kubenswrapper[5133]: I1121 14:30:01.827032 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-q2sf7"] Nov 21 14:30:01 crc kubenswrapper[5133]: I1121 14:30:01.892862 5133 generic.go:334] "Generic (PLEG): container finished" podID="88666366-11ef-4546-b82f-6a71b10bb115" containerID="1a88102269bf6fb70be502741ae33e2e53e8686c4264576273ff0f276370698c" exitCode=0 Nov 21 14:30:01 crc kubenswrapper[5133]: I1121 14:30:01.892908 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395590-w6qvq" event={"ID":"88666366-11ef-4546-b82f-6a71b10bb115","Type":"ContainerDied","Data":"1a88102269bf6fb70be502741ae33e2e53e8686c4264576273ff0f276370698c"} Nov 21 14:30:01 crc kubenswrapper[5133]: I1121 14:30:01.892939 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395590-w6qvq" event={"ID":"88666366-11ef-4546-b82f-6a71b10bb115","Type":"ContainerStarted","Data":"853ee2465c4de58795e03cfca7b22410a424f35f2092cac091a8296d21807390"} Nov 21 14:30:01 crc kubenswrapper[5133]: I1121 14:30:01.962712 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7bd4c555-a0c1-4413-bb1d-6cb004c07f96-catalog-content\") pod \"redhat-marketplace-q2sf7\" (UID: \"7bd4c555-a0c1-4413-bb1d-6cb004c07f96\") " pod="openshift-marketplace/redhat-marketplace-q2sf7" Nov 21 14:30:01 crc kubenswrapper[5133]: I1121 14:30:01.962957 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7bd4c555-a0c1-4413-bb1d-6cb004c07f96-utilities\") pod \"redhat-marketplace-q2sf7\" (UID: \"7bd4c555-a0c1-4413-bb1d-6cb004c07f96\") " pod="openshift-marketplace/redhat-marketplace-q2sf7" Nov 21 14:30:01 crc kubenswrapper[5133]: I1121 14:30:01.963190 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8mvkn\" (UniqueName: \"kubernetes.io/projected/7bd4c555-a0c1-4413-bb1d-6cb004c07f96-kube-api-access-8mvkn\") pod \"redhat-marketplace-q2sf7\" (UID: \"7bd4c555-a0c1-4413-bb1d-6cb004c07f96\") " pod="openshift-marketplace/redhat-marketplace-q2sf7" Nov 21 14:30:02 crc kubenswrapper[5133]: I1121 14:30:02.064846 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7bd4c555-a0c1-4413-bb1d-6cb004c07f96-utilities\") pod \"redhat-marketplace-q2sf7\" (UID: \"7bd4c555-a0c1-4413-bb1d-6cb004c07f96\") " pod="openshift-marketplace/redhat-marketplace-q2sf7" Nov 21 14:30:02 crc kubenswrapper[5133]: I1121 14:30:02.064941 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8mvkn\" (UniqueName: \"kubernetes.io/projected/7bd4c555-a0c1-4413-bb1d-6cb004c07f96-kube-api-access-8mvkn\") pod \"redhat-marketplace-q2sf7\" (UID: \"7bd4c555-a0c1-4413-bb1d-6cb004c07f96\") " pod="openshift-marketplace/redhat-marketplace-q2sf7" Nov 21 14:30:02 crc kubenswrapper[5133]: I1121 14:30:02.065049 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7bd4c555-a0c1-4413-bb1d-6cb004c07f96-catalog-content\") pod \"redhat-marketplace-q2sf7\" (UID: \"7bd4c555-a0c1-4413-bb1d-6cb004c07f96\") " pod="openshift-marketplace/redhat-marketplace-q2sf7" Nov 21 14:30:02 crc kubenswrapper[5133]: I1121 14:30:02.065463 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7bd4c555-a0c1-4413-bb1d-6cb004c07f96-catalog-content\") pod \"redhat-marketplace-q2sf7\" (UID: \"7bd4c555-a0c1-4413-bb1d-6cb004c07f96\") " pod="openshift-marketplace/redhat-marketplace-q2sf7" Nov 21 14:30:02 crc kubenswrapper[5133]: I1121 14:30:02.065497 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7bd4c555-a0c1-4413-bb1d-6cb004c07f96-utilities\") pod \"redhat-marketplace-q2sf7\" (UID: \"7bd4c555-a0c1-4413-bb1d-6cb004c07f96\") " pod="openshift-marketplace/redhat-marketplace-q2sf7" Nov 21 14:30:02 crc kubenswrapper[5133]: I1121 14:30:02.095024 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8mvkn\" (UniqueName: \"kubernetes.io/projected/7bd4c555-a0c1-4413-bb1d-6cb004c07f96-kube-api-access-8mvkn\") pod \"redhat-marketplace-q2sf7\" (UID: \"7bd4c555-a0c1-4413-bb1d-6cb004c07f96\") " pod="openshift-marketplace/redhat-marketplace-q2sf7" Nov 21 14:30:02 crc kubenswrapper[5133]: I1121 14:30:02.213686 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-q2sf7" Nov 21 14:30:02 crc kubenswrapper[5133]: I1121 14:30:02.652623 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-q2sf7"] Nov 21 14:30:02 crc kubenswrapper[5133]: I1121 14:30:02.904463 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-q2sf7" event={"ID":"7bd4c555-a0c1-4413-bb1d-6cb004c07f96","Type":"ContainerStarted","Data":"87cb03c768d1f7cb0d272191ca80c428e0c18eaaa2a6ce38ad2000c2400d4244"} Nov 21 14:30:02 crc kubenswrapper[5133]: I1121 14:30:02.905087 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-q2sf7" event={"ID":"7bd4c555-a0c1-4413-bb1d-6cb004c07f96","Type":"ContainerStarted","Data":"e8dafc6ca263aca1a136915ef2a3e0a29da0086735bf524f3d76a3593e8515db"} Nov 21 14:30:03 crc kubenswrapper[5133]: I1121 14:30:03.274451 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395590-w6qvq" Nov 21 14:30:03 crc kubenswrapper[5133]: I1121 14:30:03.419363 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/88666366-11ef-4546-b82f-6a71b10bb115-secret-volume\") pod \"88666366-11ef-4546-b82f-6a71b10bb115\" (UID: \"88666366-11ef-4546-b82f-6a71b10bb115\") " Nov 21 14:30:03 crc kubenswrapper[5133]: I1121 14:30:03.419421 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/88666366-11ef-4546-b82f-6a71b10bb115-config-volume\") pod \"88666366-11ef-4546-b82f-6a71b10bb115\" (UID: \"88666366-11ef-4546-b82f-6a71b10bb115\") " Nov 21 14:30:03 crc kubenswrapper[5133]: I1121 14:30:03.419608 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7h2fv\" (UniqueName: \"kubernetes.io/projected/88666366-11ef-4546-b82f-6a71b10bb115-kube-api-access-7h2fv\") pod \"88666366-11ef-4546-b82f-6a71b10bb115\" (UID: \"88666366-11ef-4546-b82f-6a71b10bb115\") " Nov 21 14:30:03 crc kubenswrapper[5133]: I1121 14:30:03.420924 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/88666366-11ef-4546-b82f-6a71b10bb115-config-volume" (OuterVolumeSpecName: "config-volume") pod "88666366-11ef-4546-b82f-6a71b10bb115" (UID: "88666366-11ef-4546-b82f-6a71b10bb115"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:30:03 crc kubenswrapper[5133]: I1121 14:30:03.428643 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/88666366-11ef-4546-b82f-6a71b10bb115-kube-api-access-7h2fv" (OuterVolumeSpecName: "kube-api-access-7h2fv") pod "88666366-11ef-4546-b82f-6a71b10bb115" (UID: "88666366-11ef-4546-b82f-6a71b10bb115"). InnerVolumeSpecName "kube-api-access-7h2fv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:30:03 crc kubenswrapper[5133]: I1121 14:30:03.430792 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/88666366-11ef-4546-b82f-6a71b10bb115-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "88666366-11ef-4546-b82f-6a71b10bb115" (UID: "88666366-11ef-4546-b82f-6a71b10bb115"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:30:03 crc kubenswrapper[5133]: I1121 14:30:03.521948 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7h2fv\" (UniqueName: \"kubernetes.io/projected/88666366-11ef-4546-b82f-6a71b10bb115-kube-api-access-7h2fv\") on node \"crc\" DevicePath \"\"" Nov 21 14:30:03 crc kubenswrapper[5133]: I1121 14:30:03.522016 5133 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/88666366-11ef-4546-b82f-6a71b10bb115-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 21 14:30:03 crc kubenswrapper[5133]: I1121 14:30:03.522036 5133 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/88666366-11ef-4546-b82f-6a71b10bb115-config-volume\") on node \"crc\" DevicePath \"\"" Nov 21 14:30:03 crc kubenswrapper[5133]: I1121 14:30:03.914243 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395590-w6qvq" event={"ID":"88666366-11ef-4546-b82f-6a71b10bb115","Type":"ContainerDied","Data":"853ee2465c4de58795e03cfca7b22410a424f35f2092cac091a8296d21807390"} Nov 21 14:30:03 crc kubenswrapper[5133]: I1121 14:30:03.915834 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="853ee2465c4de58795e03cfca7b22410a424f35f2092cac091a8296d21807390" Nov 21 14:30:03 crc kubenswrapper[5133]: I1121 14:30:03.915858 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-q2sf7" event={"ID":"7bd4c555-a0c1-4413-bb1d-6cb004c07f96","Type":"ContainerDied","Data":"87cb03c768d1f7cb0d272191ca80c428e0c18eaaa2a6ce38ad2000c2400d4244"} Nov 21 14:30:03 crc kubenswrapper[5133]: I1121 14:30:03.915769 5133 generic.go:334] "Generic (PLEG): container finished" podID="7bd4c555-a0c1-4413-bb1d-6cb004c07f96" containerID="87cb03c768d1f7cb0d272191ca80c428e0c18eaaa2a6ce38ad2000c2400d4244" exitCode=0 Nov 21 14:30:03 crc kubenswrapper[5133]: I1121 14:30:03.914246 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395590-w6qvq" Nov 21 14:30:04 crc kubenswrapper[5133]: I1121 14:30:04.353229 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395545-wrdtj"] Nov 21 14:30:04 crc kubenswrapper[5133]: I1121 14:30:04.360787 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395545-wrdtj"] Nov 21 14:30:04 crc kubenswrapper[5133]: I1121 14:30:04.509309 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="af22fa12-851f-4ec2-81f1-b3df1186e00c" path="/var/lib/kubelet/pods/af22fa12-851f-4ec2-81f1-b3df1186e00c/volumes" Nov 21 14:30:05 crc kubenswrapper[5133]: I1121 14:30:05.935209 5133 generic.go:334] "Generic (PLEG): container finished" podID="7bd4c555-a0c1-4413-bb1d-6cb004c07f96" containerID="69a95935d75a7323e32104939fb8e7a0087b433f593067fbc188ae470c10c1de" exitCode=0 Nov 21 14:30:05 crc kubenswrapper[5133]: I1121 14:30:05.935388 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-q2sf7" event={"ID":"7bd4c555-a0c1-4413-bb1d-6cb004c07f96","Type":"ContainerDied","Data":"69a95935d75a7323e32104939fb8e7a0087b433f593067fbc188ae470c10c1de"} Nov 21 14:30:06 crc kubenswrapper[5133]: I1121 14:30:06.945940 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-q2sf7" event={"ID":"7bd4c555-a0c1-4413-bb1d-6cb004c07f96","Type":"ContainerStarted","Data":"6742e840c63c58e0ac42d54326f1560a99da42f998c534a4678eddccbd1e2c4a"} Nov 21 14:30:06 crc kubenswrapper[5133]: I1121 14:30:06.974930 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-q2sf7" podStartSLOduration=3.279707794 podStartE2EDuration="5.974910175s" podCreationTimestamp="2025-11-21 14:30:01 +0000 UTC" firstStartedPulling="2025-11-21 14:30:03.917759813 +0000 UTC m=+2863.715592081" lastFinishedPulling="2025-11-21 14:30:06.612962224 +0000 UTC m=+2866.410794462" observedRunningTime="2025-11-21 14:30:06.963602671 +0000 UTC m=+2866.761434919" watchObservedRunningTime="2025-11-21 14:30:06.974910175 +0000 UTC m=+2866.772742423" Nov 21 14:30:12 crc kubenswrapper[5133]: I1121 14:30:12.214720 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-q2sf7" Nov 21 14:30:12 crc kubenswrapper[5133]: I1121 14:30:12.215258 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-q2sf7" Nov 21 14:30:12 crc kubenswrapper[5133]: I1121 14:30:12.254375 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-q2sf7" Nov 21 14:30:13 crc kubenswrapper[5133]: I1121 14:30:13.061437 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-q2sf7" Nov 21 14:30:14 crc kubenswrapper[5133]: I1121 14:30:14.233197 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-q2sf7"] Nov 21 14:30:15 crc kubenswrapper[5133]: I1121 14:30:15.025527 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-q2sf7" podUID="7bd4c555-a0c1-4413-bb1d-6cb004c07f96" containerName="registry-server" containerID="cri-o://6742e840c63c58e0ac42d54326f1560a99da42f998c534a4678eddccbd1e2c4a" gracePeriod=2 Nov 21 14:30:15 crc kubenswrapper[5133]: I1121 14:30:15.464117 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-q2sf7" Nov 21 14:30:15 crc kubenswrapper[5133]: I1121 14:30:15.628959 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7bd4c555-a0c1-4413-bb1d-6cb004c07f96-catalog-content\") pod \"7bd4c555-a0c1-4413-bb1d-6cb004c07f96\" (UID: \"7bd4c555-a0c1-4413-bb1d-6cb004c07f96\") " Nov 21 14:30:15 crc kubenswrapper[5133]: I1121 14:30:15.629466 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7bd4c555-a0c1-4413-bb1d-6cb004c07f96-utilities\") pod \"7bd4c555-a0c1-4413-bb1d-6cb004c07f96\" (UID: \"7bd4c555-a0c1-4413-bb1d-6cb004c07f96\") " Nov 21 14:30:15 crc kubenswrapper[5133]: I1121 14:30:15.629763 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8mvkn\" (UniqueName: \"kubernetes.io/projected/7bd4c555-a0c1-4413-bb1d-6cb004c07f96-kube-api-access-8mvkn\") pod \"7bd4c555-a0c1-4413-bb1d-6cb004c07f96\" (UID: \"7bd4c555-a0c1-4413-bb1d-6cb004c07f96\") " Nov 21 14:30:15 crc kubenswrapper[5133]: I1121 14:30:15.630902 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7bd4c555-a0c1-4413-bb1d-6cb004c07f96-utilities" (OuterVolumeSpecName: "utilities") pod "7bd4c555-a0c1-4413-bb1d-6cb004c07f96" (UID: "7bd4c555-a0c1-4413-bb1d-6cb004c07f96"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:30:15 crc kubenswrapper[5133]: I1121 14:30:15.635424 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bd4c555-a0c1-4413-bb1d-6cb004c07f96-kube-api-access-8mvkn" (OuterVolumeSpecName: "kube-api-access-8mvkn") pod "7bd4c555-a0c1-4413-bb1d-6cb004c07f96" (UID: "7bd4c555-a0c1-4413-bb1d-6cb004c07f96"). InnerVolumeSpecName "kube-api-access-8mvkn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:30:15 crc kubenswrapper[5133]: I1121 14:30:15.671089 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7bd4c555-a0c1-4413-bb1d-6cb004c07f96-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7bd4c555-a0c1-4413-bb1d-6cb004c07f96" (UID: "7bd4c555-a0c1-4413-bb1d-6cb004c07f96"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:30:15 crc kubenswrapper[5133]: I1121 14:30:15.732869 5133 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7bd4c555-a0c1-4413-bb1d-6cb004c07f96-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 14:30:15 crc kubenswrapper[5133]: I1121 14:30:15.732913 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8mvkn\" (UniqueName: \"kubernetes.io/projected/7bd4c555-a0c1-4413-bb1d-6cb004c07f96-kube-api-access-8mvkn\") on node \"crc\" DevicePath \"\"" Nov 21 14:30:15 crc kubenswrapper[5133]: I1121 14:30:15.732944 5133 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7bd4c555-a0c1-4413-bb1d-6cb004c07f96-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 14:30:16 crc kubenswrapper[5133]: I1121 14:30:16.039161 5133 generic.go:334] "Generic (PLEG): container finished" podID="7bd4c555-a0c1-4413-bb1d-6cb004c07f96" containerID="6742e840c63c58e0ac42d54326f1560a99da42f998c534a4678eddccbd1e2c4a" exitCode=0 Nov 21 14:30:16 crc kubenswrapper[5133]: I1121 14:30:16.039226 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-q2sf7" event={"ID":"7bd4c555-a0c1-4413-bb1d-6cb004c07f96","Type":"ContainerDied","Data":"6742e840c63c58e0ac42d54326f1560a99da42f998c534a4678eddccbd1e2c4a"} Nov 21 14:30:16 crc kubenswrapper[5133]: I1121 14:30:16.039259 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-q2sf7" event={"ID":"7bd4c555-a0c1-4413-bb1d-6cb004c07f96","Type":"ContainerDied","Data":"e8dafc6ca263aca1a136915ef2a3e0a29da0086735bf524f3d76a3593e8515db"} Nov 21 14:30:16 crc kubenswrapper[5133]: I1121 14:30:16.039280 5133 scope.go:117] "RemoveContainer" containerID="6742e840c63c58e0ac42d54326f1560a99da42f998c534a4678eddccbd1e2c4a" Nov 21 14:30:16 crc kubenswrapper[5133]: I1121 14:30:16.039285 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-q2sf7" Nov 21 14:30:16 crc kubenswrapper[5133]: I1121 14:30:16.083836 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-q2sf7"] Nov 21 14:30:16 crc kubenswrapper[5133]: I1121 14:30:16.084381 5133 scope.go:117] "RemoveContainer" containerID="69a95935d75a7323e32104939fb8e7a0087b433f593067fbc188ae470c10c1de" Nov 21 14:30:16 crc kubenswrapper[5133]: I1121 14:30:16.091674 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-q2sf7"] Nov 21 14:30:16 crc kubenswrapper[5133]: I1121 14:30:16.103823 5133 scope.go:117] "RemoveContainer" containerID="87cb03c768d1f7cb0d272191ca80c428e0c18eaaa2a6ce38ad2000c2400d4244" Nov 21 14:30:16 crc kubenswrapper[5133]: I1121 14:30:16.148915 5133 scope.go:117] "RemoveContainer" containerID="6742e840c63c58e0ac42d54326f1560a99da42f998c534a4678eddccbd1e2c4a" Nov 21 14:30:16 crc kubenswrapper[5133]: E1121 14:30:16.149544 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6742e840c63c58e0ac42d54326f1560a99da42f998c534a4678eddccbd1e2c4a\": container with ID starting with 6742e840c63c58e0ac42d54326f1560a99da42f998c534a4678eddccbd1e2c4a not found: ID does not exist" containerID="6742e840c63c58e0ac42d54326f1560a99da42f998c534a4678eddccbd1e2c4a" Nov 21 14:30:16 crc kubenswrapper[5133]: I1121 14:30:16.149620 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6742e840c63c58e0ac42d54326f1560a99da42f998c534a4678eddccbd1e2c4a"} err="failed to get container status \"6742e840c63c58e0ac42d54326f1560a99da42f998c534a4678eddccbd1e2c4a\": rpc error: code = NotFound desc = could not find container \"6742e840c63c58e0ac42d54326f1560a99da42f998c534a4678eddccbd1e2c4a\": container with ID starting with 6742e840c63c58e0ac42d54326f1560a99da42f998c534a4678eddccbd1e2c4a not found: ID does not exist" Nov 21 14:30:16 crc kubenswrapper[5133]: I1121 14:30:16.149658 5133 scope.go:117] "RemoveContainer" containerID="69a95935d75a7323e32104939fb8e7a0087b433f593067fbc188ae470c10c1de" Nov 21 14:30:16 crc kubenswrapper[5133]: E1121 14:30:16.150149 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"69a95935d75a7323e32104939fb8e7a0087b433f593067fbc188ae470c10c1de\": container with ID starting with 69a95935d75a7323e32104939fb8e7a0087b433f593067fbc188ae470c10c1de not found: ID does not exist" containerID="69a95935d75a7323e32104939fb8e7a0087b433f593067fbc188ae470c10c1de" Nov 21 14:30:16 crc kubenswrapper[5133]: I1121 14:30:16.150177 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"69a95935d75a7323e32104939fb8e7a0087b433f593067fbc188ae470c10c1de"} err="failed to get container status \"69a95935d75a7323e32104939fb8e7a0087b433f593067fbc188ae470c10c1de\": rpc error: code = NotFound desc = could not find container \"69a95935d75a7323e32104939fb8e7a0087b433f593067fbc188ae470c10c1de\": container with ID starting with 69a95935d75a7323e32104939fb8e7a0087b433f593067fbc188ae470c10c1de not found: ID does not exist" Nov 21 14:30:16 crc kubenswrapper[5133]: I1121 14:30:16.150196 5133 scope.go:117] "RemoveContainer" containerID="87cb03c768d1f7cb0d272191ca80c428e0c18eaaa2a6ce38ad2000c2400d4244" Nov 21 14:30:16 crc kubenswrapper[5133]: E1121 14:30:16.150481 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"87cb03c768d1f7cb0d272191ca80c428e0c18eaaa2a6ce38ad2000c2400d4244\": container with ID starting with 87cb03c768d1f7cb0d272191ca80c428e0c18eaaa2a6ce38ad2000c2400d4244 not found: ID does not exist" containerID="87cb03c768d1f7cb0d272191ca80c428e0c18eaaa2a6ce38ad2000c2400d4244" Nov 21 14:30:16 crc kubenswrapper[5133]: I1121 14:30:16.150518 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"87cb03c768d1f7cb0d272191ca80c428e0c18eaaa2a6ce38ad2000c2400d4244"} err="failed to get container status \"87cb03c768d1f7cb0d272191ca80c428e0c18eaaa2a6ce38ad2000c2400d4244\": rpc error: code = NotFound desc = could not find container \"87cb03c768d1f7cb0d272191ca80c428e0c18eaaa2a6ce38ad2000c2400d4244\": container with ID starting with 87cb03c768d1f7cb0d272191ca80c428e0c18eaaa2a6ce38ad2000c2400d4244 not found: ID does not exist" Nov 21 14:30:16 crc kubenswrapper[5133]: I1121 14:30:16.472235 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bd4c555-a0c1-4413-bb1d-6cb004c07f96" path="/var/lib/kubelet/pods/7bd4c555-a0c1-4413-bb1d-6cb004c07f96/volumes" Nov 21 14:30:23 crc kubenswrapper[5133]: I1121 14:30:23.109566 5133 generic.go:334] "Generic (PLEG): container finished" podID="d56ab399-fd3b-41d9-a915-ecf9bd15d5c8" containerID="f066bdcaa867dc6cb07891f9107cc1d86f27f639947e32c640d0eb0f0e1d6318" exitCode=0 Nov 21 14:30:23 crc kubenswrapper[5133]: I1121 14:30:23.109674 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb" event={"ID":"d56ab399-fd3b-41d9-a915-ecf9bd15d5c8","Type":"ContainerDied","Data":"f066bdcaa867dc6cb07891f9107cc1d86f27f639947e32c640d0eb0f0e1d6318"} Nov 21 14:30:24 crc kubenswrapper[5133]: I1121 14:30:24.520285 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb" Nov 21 14:30:24 crc kubenswrapper[5133]: I1121 14:30:24.718362 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-neutron-metadata-combined-ca-bundle\") pod \"d56ab399-fd3b-41d9-a915-ecf9bd15d5c8\" (UID: \"d56ab399-fd3b-41d9-a915-ecf9bd15d5c8\") " Nov 21 14:30:24 crc kubenswrapper[5133]: I1121 14:30:24.718494 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-ceph\") pod \"d56ab399-fd3b-41d9-a915-ecf9bd15d5c8\" (UID: \"d56ab399-fd3b-41d9-a915-ecf9bd15d5c8\") " Nov 21 14:30:24 crc kubenswrapper[5133]: I1121 14:30:24.718519 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-libvirt-combined-ca-bundle\") pod \"d56ab399-fd3b-41d9-a915-ecf9bd15d5c8\" (UID: \"d56ab399-fd3b-41d9-a915-ecf9bd15d5c8\") " Nov 21 14:30:24 crc kubenswrapper[5133]: I1121 14:30:24.718558 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"d56ab399-fd3b-41d9-a915-ecf9bd15d5c8\" (UID: \"d56ab399-fd3b-41d9-a915-ecf9bd15d5c8\") " Nov 21 14:30:24 crc kubenswrapper[5133]: I1121 14:30:24.718576 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"d56ab399-fd3b-41d9-a915-ecf9bd15d5c8\" (UID: \"d56ab399-fd3b-41d9-a915-ecf9bd15d5c8\") " Nov 21 14:30:24 crc kubenswrapper[5133]: I1121 14:30:24.718592 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-inventory\") pod \"d56ab399-fd3b-41d9-a915-ecf9bd15d5c8\" (UID: \"d56ab399-fd3b-41d9-a915-ecf9bd15d5c8\") " Nov 21 14:30:24 crc kubenswrapper[5133]: I1121 14:30:24.718612 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-nova-combined-ca-bundle\") pod \"d56ab399-fd3b-41d9-a915-ecf9bd15d5c8\" (UID: \"d56ab399-fd3b-41d9-a915-ecf9bd15d5c8\") " Nov 21 14:30:24 crc kubenswrapper[5133]: I1121 14:30:24.718689 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-bootstrap-combined-ca-bundle\") pod \"d56ab399-fd3b-41d9-a915-ecf9bd15d5c8\" (UID: \"d56ab399-fd3b-41d9-a915-ecf9bd15d5c8\") " Nov 21 14:30:24 crc kubenswrapper[5133]: I1121 14:30:24.718727 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-67dkr\" (UniqueName: \"kubernetes.io/projected/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-kube-api-access-67dkr\") pod \"d56ab399-fd3b-41d9-a915-ecf9bd15d5c8\" (UID: \"d56ab399-fd3b-41d9-a915-ecf9bd15d5c8\") " Nov 21 14:30:24 crc kubenswrapper[5133]: I1121 14:30:24.718761 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-ovn-combined-ca-bundle\") pod \"d56ab399-fd3b-41d9-a915-ecf9bd15d5c8\" (UID: \"d56ab399-fd3b-41d9-a915-ecf9bd15d5c8\") " Nov 21 14:30:24 crc kubenswrapper[5133]: I1121 14:30:24.718795 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-openstack-edpm-ipam-ovn-default-certs-0\") pod \"d56ab399-fd3b-41d9-a915-ecf9bd15d5c8\" (UID: \"d56ab399-fd3b-41d9-a915-ecf9bd15d5c8\") " Nov 21 14:30:24 crc kubenswrapper[5133]: I1121 14:30:24.718830 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-ssh-key\") pod \"d56ab399-fd3b-41d9-a915-ecf9bd15d5c8\" (UID: \"d56ab399-fd3b-41d9-a915-ecf9bd15d5c8\") " Nov 21 14:30:24 crc kubenswrapper[5133]: I1121 14:30:24.718867 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-repo-setup-combined-ca-bundle\") pod \"d56ab399-fd3b-41d9-a915-ecf9bd15d5c8\" (UID: \"d56ab399-fd3b-41d9-a915-ecf9bd15d5c8\") " Nov 21 14:30:24 crc kubenswrapper[5133]: I1121 14:30:24.724955 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-ceph" (OuterVolumeSpecName: "ceph") pod "d56ab399-fd3b-41d9-a915-ecf9bd15d5c8" (UID: "d56ab399-fd3b-41d9-a915-ecf9bd15d5c8"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:30:24 crc kubenswrapper[5133]: I1121 14:30:24.725730 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "d56ab399-fd3b-41d9-a915-ecf9bd15d5c8" (UID: "d56ab399-fd3b-41d9-a915-ecf9bd15d5c8"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:30:24 crc kubenswrapper[5133]: I1121 14:30:24.725884 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-openstack-edpm-ipam-neutron-metadata-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-neutron-metadata-default-certs-0") pod "d56ab399-fd3b-41d9-a915-ecf9bd15d5c8" (UID: "d56ab399-fd3b-41d9-a915-ecf9bd15d5c8"). InnerVolumeSpecName "openstack-edpm-ipam-neutron-metadata-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:30:24 crc kubenswrapper[5133]: I1121 14:30:24.727671 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-openstack-edpm-ipam-ovn-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-ovn-default-certs-0") pod "d56ab399-fd3b-41d9-a915-ecf9bd15d5c8" (UID: "d56ab399-fd3b-41d9-a915-ecf9bd15d5c8"). InnerVolumeSpecName "openstack-edpm-ipam-ovn-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:30:24 crc kubenswrapper[5133]: I1121 14:30:24.727727 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "d56ab399-fd3b-41d9-a915-ecf9bd15d5c8" (UID: "d56ab399-fd3b-41d9-a915-ecf9bd15d5c8"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:30:24 crc kubenswrapper[5133]: I1121 14:30:24.727945 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-openstack-edpm-ipam-libvirt-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-libvirt-default-certs-0") pod "d56ab399-fd3b-41d9-a915-ecf9bd15d5c8" (UID: "d56ab399-fd3b-41d9-a915-ecf9bd15d5c8"). InnerVolumeSpecName "openstack-edpm-ipam-libvirt-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:30:24 crc kubenswrapper[5133]: I1121 14:30:24.728466 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "d56ab399-fd3b-41d9-a915-ecf9bd15d5c8" (UID: "d56ab399-fd3b-41d9-a915-ecf9bd15d5c8"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:30:24 crc kubenswrapper[5133]: I1121 14:30:24.728487 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "d56ab399-fd3b-41d9-a915-ecf9bd15d5c8" (UID: "d56ab399-fd3b-41d9-a915-ecf9bd15d5c8"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:30:24 crc kubenswrapper[5133]: I1121 14:30:24.729005 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "d56ab399-fd3b-41d9-a915-ecf9bd15d5c8" (UID: "d56ab399-fd3b-41d9-a915-ecf9bd15d5c8"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:30:24 crc kubenswrapper[5133]: I1121 14:30:24.730616 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "d56ab399-fd3b-41d9-a915-ecf9bd15d5c8" (UID: "d56ab399-fd3b-41d9-a915-ecf9bd15d5c8"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:30:24 crc kubenswrapper[5133]: I1121 14:30:24.738590 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-kube-api-access-67dkr" (OuterVolumeSpecName: "kube-api-access-67dkr") pod "d56ab399-fd3b-41d9-a915-ecf9bd15d5c8" (UID: "d56ab399-fd3b-41d9-a915-ecf9bd15d5c8"). InnerVolumeSpecName "kube-api-access-67dkr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:30:24 crc kubenswrapper[5133]: I1121 14:30:24.755645 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "d56ab399-fd3b-41d9-a915-ecf9bd15d5c8" (UID: "d56ab399-fd3b-41d9-a915-ecf9bd15d5c8"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:30:24 crc kubenswrapper[5133]: I1121 14:30:24.757157 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-inventory" (OuterVolumeSpecName: "inventory") pod "d56ab399-fd3b-41d9-a915-ecf9bd15d5c8" (UID: "d56ab399-fd3b-41d9-a915-ecf9bd15d5c8"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:30:24 crc kubenswrapper[5133]: I1121 14:30:24.821649 5133 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 14:30:24 crc kubenswrapper[5133]: I1121 14:30:24.821687 5133 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-ceph\") on node \"crc\" DevicePath \"\"" Nov 21 14:30:24 crc kubenswrapper[5133]: I1121 14:30:24.821701 5133 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 14:30:24 crc kubenswrapper[5133]: I1121 14:30:24.821716 5133 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-openstack-edpm-ipam-libvirt-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 21 14:30:24 crc kubenswrapper[5133]: I1121 14:30:24.821733 5133 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-openstack-edpm-ipam-neutron-metadata-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 21 14:30:24 crc kubenswrapper[5133]: I1121 14:30:24.821747 5133 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-inventory\") on node \"crc\" DevicePath \"\"" Nov 21 14:30:24 crc kubenswrapper[5133]: I1121 14:30:24.821876 5133 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 14:30:24 crc kubenswrapper[5133]: I1121 14:30:24.821894 5133 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 14:30:24 crc kubenswrapper[5133]: I1121 14:30:24.821906 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-67dkr\" (UniqueName: \"kubernetes.io/projected/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-kube-api-access-67dkr\") on node \"crc\" DevicePath \"\"" Nov 21 14:30:24 crc kubenswrapper[5133]: I1121 14:30:24.821917 5133 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 14:30:24 crc kubenswrapper[5133]: I1121 14:30:24.821930 5133 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-openstack-edpm-ipam-ovn-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 21 14:30:24 crc kubenswrapper[5133]: I1121 14:30:24.821941 5133 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 21 14:30:24 crc kubenswrapper[5133]: I1121 14:30:24.821954 5133 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d56ab399-fd3b-41d9-a915-ecf9bd15d5c8-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 14:30:25 crc kubenswrapper[5133]: I1121 14:30:25.129509 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb" event={"ID":"d56ab399-fd3b-41d9-a915-ecf9bd15d5c8","Type":"ContainerDied","Data":"c5fca2943414b1bdd766de49daa263a2c56204f003951cd00287878f8d5f173c"} Nov 21 14:30:25 crc kubenswrapper[5133]: I1121 14:30:25.129827 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c5fca2943414b1bdd766de49daa263a2c56204f003951cd00287878f8d5f173c" Nov 21 14:30:25 crc kubenswrapper[5133]: I1121 14:30:25.129638 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb" Nov 21 14:30:25 crc kubenswrapper[5133]: I1121 14:30:25.251470 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-lg9mx"] Nov 21 14:30:25 crc kubenswrapper[5133]: E1121 14:30:25.251903 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="88666366-11ef-4546-b82f-6a71b10bb115" containerName="collect-profiles" Nov 21 14:30:25 crc kubenswrapper[5133]: I1121 14:30:25.251928 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="88666366-11ef-4546-b82f-6a71b10bb115" containerName="collect-profiles" Nov 21 14:30:25 crc kubenswrapper[5133]: E1121 14:30:25.251946 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d56ab399-fd3b-41d9-a915-ecf9bd15d5c8" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 21 14:30:25 crc kubenswrapper[5133]: I1121 14:30:25.251957 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="d56ab399-fd3b-41d9-a915-ecf9bd15d5c8" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 21 14:30:25 crc kubenswrapper[5133]: E1121 14:30:25.251981 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7bd4c555-a0c1-4413-bb1d-6cb004c07f96" containerName="extract-utilities" Nov 21 14:30:25 crc kubenswrapper[5133]: I1121 14:30:25.251990 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="7bd4c555-a0c1-4413-bb1d-6cb004c07f96" containerName="extract-utilities" Nov 21 14:30:25 crc kubenswrapper[5133]: E1121 14:30:25.252027 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7bd4c555-a0c1-4413-bb1d-6cb004c07f96" containerName="extract-content" Nov 21 14:30:25 crc kubenswrapper[5133]: I1121 14:30:25.252037 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="7bd4c555-a0c1-4413-bb1d-6cb004c07f96" containerName="extract-content" Nov 21 14:30:25 crc kubenswrapper[5133]: E1121 14:30:25.252050 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7bd4c555-a0c1-4413-bb1d-6cb004c07f96" containerName="registry-server" Nov 21 14:30:25 crc kubenswrapper[5133]: I1121 14:30:25.252058 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="7bd4c555-a0c1-4413-bb1d-6cb004c07f96" containerName="registry-server" Nov 21 14:30:25 crc kubenswrapper[5133]: I1121 14:30:25.252226 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="88666366-11ef-4546-b82f-6a71b10bb115" containerName="collect-profiles" Nov 21 14:30:25 crc kubenswrapper[5133]: I1121 14:30:25.252257 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="d56ab399-fd3b-41d9-a915-ecf9bd15d5c8" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 21 14:30:25 crc kubenswrapper[5133]: I1121 14:30:25.252272 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="7bd4c555-a0c1-4413-bb1d-6cb004c07f96" containerName="registry-server" Nov 21 14:30:25 crc kubenswrapper[5133]: I1121 14:30:25.252904 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-lg9mx" Nov 21 14:30:25 crc kubenswrapper[5133]: I1121 14:30:25.256880 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 21 14:30:25 crc kubenswrapper[5133]: I1121 14:30:25.257157 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-ztmk4" Nov 21 14:30:25 crc kubenswrapper[5133]: I1121 14:30:25.257157 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 21 14:30:25 crc kubenswrapper[5133]: I1121 14:30:25.258239 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 21 14:30:25 crc kubenswrapper[5133]: I1121 14:30:25.262456 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 21 14:30:25 crc kubenswrapper[5133]: I1121 14:30:25.264564 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-lg9mx"] Nov 21 14:30:25 crc kubenswrapper[5133]: I1121 14:30:25.328121 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/95893adb-b1b1-4216-987f-ca92a8a72629-inventory\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-lg9mx\" (UID: \"95893adb-b1b1-4216-987f-ca92a8a72629\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-lg9mx" Nov 21 14:30:25 crc kubenswrapper[5133]: I1121 14:30:25.328185 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2gbdf\" (UniqueName: \"kubernetes.io/projected/95893adb-b1b1-4216-987f-ca92a8a72629-kube-api-access-2gbdf\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-lg9mx\" (UID: \"95893adb-b1b1-4216-987f-ca92a8a72629\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-lg9mx" Nov 21 14:30:25 crc kubenswrapper[5133]: I1121 14:30:25.328225 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/95893adb-b1b1-4216-987f-ca92a8a72629-ssh-key\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-lg9mx\" (UID: \"95893adb-b1b1-4216-987f-ca92a8a72629\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-lg9mx" Nov 21 14:30:25 crc kubenswrapper[5133]: I1121 14:30:25.328281 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/95893adb-b1b1-4216-987f-ca92a8a72629-ceph\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-lg9mx\" (UID: \"95893adb-b1b1-4216-987f-ca92a8a72629\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-lg9mx" Nov 21 14:30:25 crc kubenswrapper[5133]: I1121 14:30:25.430706 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/95893adb-b1b1-4216-987f-ca92a8a72629-ceph\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-lg9mx\" (UID: \"95893adb-b1b1-4216-987f-ca92a8a72629\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-lg9mx" Nov 21 14:30:25 crc kubenswrapper[5133]: I1121 14:30:25.431039 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/95893adb-b1b1-4216-987f-ca92a8a72629-inventory\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-lg9mx\" (UID: \"95893adb-b1b1-4216-987f-ca92a8a72629\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-lg9mx" Nov 21 14:30:25 crc kubenswrapper[5133]: I1121 14:30:25.431104 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2gbdf\" (UniqueName: \"kubernetes.io/projected/95893adb-b1b1-4216-987f-ca92a8a72629-kube-api-access-2gbdf\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-lg9mx\" (UID: \"95893adb-b1b1-4216-987f-ca92a8a72629\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-lg9mx" Nov 21 14:30:25 crc kubenswrapper[5133]: I1121 14:30:25.431161 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/95893adb-b1b1-4216-987f-ca92a8a72629-ssh-key\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-lg9mx\" (UID: \"95893adb-b1b1-4216-987f-ca92a8a72629\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-lg9mx" Nov 21 14:30:25 crc kubenswrapper[5133]: I1121 14:30:25.436196 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/95893adb-b1b1-4216-987f-ca92a8a72629-ceph\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-lg9mx\" (UID: \"95893adb-b1b1-4216-987f-ca92a8a72629\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-lg9mx" Nov 21 14:30:25 crc kubenswrapper[5133]: I1121 14:30:25.439668 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/95893adb-b1b1-4216-987f-ca92a8a72629-inventory\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-lg9mx\" (UID: \"95893adb-b1b1-4216-987f-ca92a8a72629\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-lg9mx" Nov 21 14:30:25 crc kubenswrapper[5133]: I1121 14:30:25.445830 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/95893adb-b1b1-4216-987f-ca92a8a72629-ssh-key\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-lg9mx\" (UID: \"95893adb-b1b1-4216-987f-ca92a8a72629\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-lg9mx" Nov 21 14:30:25 crc kubenswrapper[5133]: I1121 14:30:25.459679 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2gbdf\" (UniqueName: \"kubernetes.io/projected/95893adb-b1b1-4216-987f-ca92a8a72629-kube-api-access-2gbdf\") pod \"ceph-client-edpm-deployment-openstack-edpm-ipam-lg9mx\" (UID: \"95893adb-b1b1-4216-987f-ca92a8a72629\") " pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-lg9mx" Nov 21 14:30:25 crc kubenswrapper[5133]: I1121 14:30:25.575449 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-lg9mx" Nov 21 14:30:26 crc kubenswrapper[5133]: I1121 14:30:26.107787 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-lg9mx"] Nov 21 14:30:26 crc kubenswrapper[5133]: I1121 14:30:26.138680 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-lg9mx" event={"ID":"95893adb-b1b1-4216-987f-ca92a8a72629","Type":"ContainerStarted","Data":"87fb6c812b7272b84c85dcd05a812587fc62ec5d243d740eaf0dbe900ec29431"} Nov 21 14:30:27 crc kubenswrapper[5133]: I1121 14:30:27.148323 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-lg9mx" event={"ID":"95893adb-b1b1-4216-987f-ca92a8a72629","Type":"ContainerStarted","Data":"3030fa0c824f3cacf35e43e46e10cf4ffc663ace13700bc510fa80c7de25fb95"} Nov 21 14:30:27 crc kubenswrapper[5133]: I1121 14:30:27.169036 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-lg9mx" podStartSLOduration=1.660772897 podStartE2EDuration="2.168982394s" podCreationTimestamp="2025-11-21 14:30:25 +0000 UTC" firstStartedPulling="2025-11-21 14:30:26.112537744 +0000 UTC m=+2885.910370042" lastFinishedPulling="2025-11-21 14:30:26.620747291 +0000 UTC m=+2886.418579539" observedRunningTime="2025-11-21 14:30:27.167307619 +0000 UTC m=+2886.965139907" watchObservedRunningTime="2025-11-21 14:30:27.168982394 +0000 UTC m=+2886.966814672" Nov 21 14:30:33 crc kubenswrapper[5133]: I1121 14:30:33.208950 5133 generic.go:334] "Generic (PLEG): container finished" podID="95893adb-b1b1-4216-987f-ca92a8a72629" containerID="3030fa0c824f3cacf35e43e46e10cf4ffc663ace13700bc510fa80c7de25fb95" exitCode=0 Nov 21 14:30:33 crc kubenswrapper[5133]: I1121 14:30:33.209533 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-lg9mx" event={"ID":"95893adb-b1b1-4216-987f-ca92a8a72629","Type":"ContainerDied","Data":"3030fa0c824f3cacf35e43e46e10cf4ffc663ace13700bc510fa80c7de25fb95"} Nov 21 14:30:34 crc kubenswrapper[5133]: I1121 14:30:34.885832 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-lg9mx" Nov 21 14:30:35 crc kubenswrapper[5133]: I1121 14:30:35.040707 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/95893adb-b1b1-4216-987f-ca92a8a72629-ceph\") pod \"95893adb-b1b1-4216-987f-ca92a8a72629\" (UID: \"95893adb-b1b1-4216-987f-ca92a8a72629\") " Nov 21 14:30:35 crc kubenswrapper[5133]: I1121 14:30:35.040798 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/95893adb-b1b1-4216-987f-ca92a8a72629-inventory\") pod \"95893adb-b1b1-4216-987f-ca92a8a72629\" (UID: \"95893adb-b1b1-4216-987f-ca92a8a72629\") " Nov 21 14:30:35 crc kubenswrapper[5133]: I1121 14:30:35.040843 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2gbdf\" (UniqueName: \"kubernetes.io/projected/95893adb-b1b1-4216-987f-ca92a8a72629-kube-api-access-2gbdf\") pod \"95893adb-b1b1-4216-987f-ca92a8a72629\" (UID: \"95893adb-b1b1-4216-987f-ca92a8a72629\") " Nov 21 14:30:35 crc kubenswrapper[5133]: I1121 14:30:35.041038 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/95893adb-b1b1-4216-987f-ca92a8a72629-ssh-key\") pod \"95893adb-b1b1-4216-987f-ca92a8a72629\" (UID: \"95893adb-b1b1-4216-987f-ca92a8a72629\") " Nov 21 14:30:35 crc kubenswrapper[5133]: I1121 14:30:35.047800 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/95893adb-b1b1-4216-987f-ca92a8a72629-kube-api-access-2gbdf" (OuterVolumeSpecName: "kube-api-access-2gbdf") pod "95893adb-b1b1-4216-987f-ca92a8a72629" (UID: "95893adb-b1b1-4216-987f-ca92a8a72629"). InnerVolumeSpecName "kube-api-access-2gbdf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:30:35 crc kubenswrapper[5133]: I1121 14:30:35.048601 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/95893adb-b1b1-4216-987f-ca92a8a72629-ceph" (OuterVolumeSpecName: "ceph") pod "95893adb-b1b1-4216-987f-ca92a8a72629" (UID: "95893adb-b1b1-4216-987f-ca92a8a72629"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:30:35 crc kubenswrapper[5133]: I1121 14:30:35.071125 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/95893adb-b1b1-4216-987f-ca92a8a72629-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "95893adb-b1b1-4216-987f-ca92a8a72629" (UID: "95893adb-b1b1-4216-987f-ca92a8a72629"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:30:35 crc kubenswrapper[5133]: I1121 14:30:35.072586 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/95893adb-b1b1-4216-987f-ca92a8a72629-inventory" (OuterVolumeSpecName: "inventory") pod "95893adb-b1b1-4216-987f-ca92a8a72629" (UID: "95893adb-b1b1-4216-987f-ca92a8a72629"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:30:35 crc kubenswrapper[5133]: I1121 14:30:35.143474 5133 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/95893adb-b1b1-4216-987f-ca92a8a72629-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 21 14:30:35 crc kubenswrapper[5133]: I1121 14:30:35.143511 5133 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/95893adb-b1b1-4216-987f-ca92a8a72629-ceph\") on node \"crc\" DevicePath \"\"" Nov 21 14:30:35 crc kubenswrapper[5133]: I1121 14:30:35.143520 5133 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/95893adb-b1b1-4216-987f-ca92a8a72629-inventory\") on node \"crc\" DevicePath \"\"" Nov 21 14:30:35 crc kubenswrapper[5133]: I1121 14:30:35.143531 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2gbdf\" (UniqueName: \"kubernetes.io/projected/95893adb-b1b1-4216-987f-ca92a8a72629-kube-api-access-2gbdf\") on node \"crc\" DevicePath \"\"" Nov 21 14:30:35 crc kubenswrapper[5133]: I1121 14:30:35.229682 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-lg9mx" event={"ID":"95893adb-b1b1-4216-987f-ca92a8a72629","Type":"ContainerDied","Data":"87fb6c812b7272b84c85dcd05a812587fc62ec5d243d740eaf0dbe900ec29431"} Nov 21 14:30:35 crc kubenswrapper[5133]: I1121 14:30:35.229731 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="87fb6c812b7272b84c85dcd05a812587fc62ec5d243d740eaf0dbe900ec29431" Nov 21 14:30:35 crc kubenswrapper[5133]: I1121 14:30:35.229811 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-client-edpm-deployment-openstack-edpm-ipam-lg9mx" Nov 21 14:30:35 crc kubenswrapper[5133]: I1121 14:30:35.320665 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-bhf62"] Nov 21 14:30:35 crc kubenswrapper[5133]: E1121 14:30:35.321163 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="95893adb-b1b1-4216-987f-ca92a8a72629" containerName="ceph-client-edpm-deployment-openstack-edpm-ipam" Nov 21 14:30:35 crc kubenswrapper[5133]: I1121 14:30:35.321192 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="95893adb-b1b1-4216-987f-ca92a8a72629" containerName="ceph-client-edpm-deployment-openstack-edpm-ipam" Nov 21 14:30:35 crc kubenswrapper[5133]: I1121 14:30:35.321410 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="95893adb-b1b1-4216-987f-ca92a8a72629" containerName="ceph-client-edpm-deployment-openstack-edpm-ipam" Nov 21 14:30:35 crc kubenswrapper[5133]: I1121 14:30:35.322122 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-bhf62" Nov 21 14:30:35 crc kubenswrapper[5133]: I1121 14:30:35.326477 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 21 14:30:35 crc kubenswrapper[5133]: I1121 14:30:35.326905 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 21 14:30:35 crc kubenswrapper[5133]: I1121 14:30:35.327235 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 21 14:30:35 crc kubenswrapper[5133]: I1121 14:30:35.327546 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-ztmk4" Nov 21 14:30:35 crc kubenswrapper[5133]: I1121 14:30:35.327782 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-config" Nov 21 14:30:35 crc kubenswrapper[5133]: I1121 14:30:35.328126 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 21 14:30:35 crc kubenswrapper[5133]: I1121 14:30:35.356207 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-bhf62"] Nov 21 14:30:35 crc kubenswrapper[5133]: I1121 14:30:35.357688 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/0fec699f-ccdd-498f-8796-2c798f70d259-ceph\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-bhf62\" (UID: \"0fec699f-ccdd-498f-8796-2c798f70d259\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-bhf62" Nov 21 14:30:35 crc kubenswrapper[5133]: I1121 14:30:35.357773 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0fec699f-ccdd-498f-8796-2c798f70d259-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-bhf62\" (UID: \"0fec699f-ccdd-498f-8796-2c798f70d259\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-bhf62" Nov 21 14:30:35 crc kubenswrapper[5133]: I1121 14:30:35.357831 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5s88m\" (UniqueName: \"kubernetes.io/projected/0fec699f-ccdd-498f-8796-2c798f70d259-kube-api-access-5s88m\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-bhf62\" (UID: \"0fec699f-ccdd-498f-8796-2c798f70d259\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-bhf62" Nov 21 14:30:35 crc kubenswrapper[5133]: I1121 14:30:35.357854 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0fec699f-ccdd-498f-8796-2c798f70d259-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-bhf62\" (UID: \"0fec699f-ccdd-498f-8796-2c798f70d259\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-bhf62" Nov 21 14:30:35 crc kubenswrapper[5133]: I1121 14:30:35.357982 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/0fec699f-ccdd-498f-8796-2c798f70d259-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-bhf62\" (UID: \"0fec699f-ccdd-498f-8796-2c798f70d259\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-bhf62" Nov 21 14:30:35 crc kubenswrapper[5133]: I1121 14:30:35.358049 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0fec699f-ccdd-498f-8796-2c798f70d259-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-bhf62\" (UID: \"0fec699f-ccdd-498f-8796-2c798f70d259\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-bhf62" Nov 21 14:30:35 crc kubenswrapper[5133]: I1121 14:30:35.459247 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5s88m\" (UniqueName: \"kubernetes.io/projected/0fec699f-ccdd-498f-8796-2c798f70d259-kube-api-access-5s88m\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-bhf62\" (UID: \"0fec699f-ccdd-498f-8796-2c798f70d259\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-bhf62" Nov 21 14:30:35 crc kubenswrapper[5133]: I1121 14:30:35.459315 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0fec699f-ccdd-498f-8796-2c798f70d259-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-bhf62\" (UID: \"0fec699f-ccdd-498f-8796-2c798f70d259\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-bhf62" Nov 21 14:30:35 crc kubenswrapper[5133]: I1121 14:30:35.459535 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/0fec699f-ccdd-498f-8796-2c798f70d259-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-bhf62\" (UID: \"0fec699f-ccdd-498f-8796-2c798f70d259\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-bhf62" Nov 21 14:30:35 crc kubenswrapper[5133]: I1121 14:30:35.459808 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0fec699f-ccdd-498f-8796-2c798f70d259-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-bhf62\" (UID: \"0fec699f-ccdd-498f-8796-2c798f70d259\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-bhf62" Nov 21 14:30:35 crc kubenswrapper[5133]: I1121 14:30:35.459911 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/0fec699f-ccdd-498f-8796-2c798f70d259-ceph\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-bhf62\" (UID: \"0fec699f-ccdd-498f-8796-2c798f70d259\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-bhf62" Nov 21 14:30:35 crc kubenswrapper[5133]: I1121 14:30:35.459962 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0fec699f-ccdd-498f-8796-2c798f70d259-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-bhf62\" (UID: \"0fec699f-ccdd-498f-8796-2c798f70d259\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-bhf62" Nov 21 14:30:35 crc kubenswrapper[5133]: I1121 14:30:35.461290 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/0fec699f-ccdd-498f-8796-2c798f70d259-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-bhf62\" (UID: \"0fec699f-ccdd-498f-8796-2c798f70d259\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-bhf62" Nov 21 14:30:35 crc kubenswrapper[5133]: I1121 14:30:35.463589 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0fec699f-ccdd-498f-8796-2c798f70d259-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-bhf62\" (UID: \"0fec699f-ccdd-498f-8796-2c798f70d259\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-bhf62" Nov 21 14:30:35 crc kubenswrapper[5133]: I1121 14:30:35.464108 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0fec699f-ccdd-498f-8796-2c798f70d259-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-bhf62\" (UID: \"0fec699f-ccdd-498f-8796-2c798f70d259\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-bhf62" Nov 21 14:30:35 crc kubenswrapper[5133]: I1121 14:30:35.469712 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0fec699f-ccdd-498f-8796-2c798f70d259-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-bhf62\" (UID: \"0fec699f-ccdd-498f-8796-2c798f70d259\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-bhf62" Nov 21 14:30:35 crc kubenswrapper[5133]: I1121 14:30:35.469845 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/0fec699f-ccdd-498f-8796-2c798f70d259-ceph\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-bhf62\" (UID: \"0fec699f-ccdd-498f-8796-2c798f70d259\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-bhf62" Nov 21 14:30:35 crc kubenswrapper[5133]: I1121 14:30:35.493260 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5s88m\" (UniqueName: \"kubernetes.io/projected/0fec699f-ccdd-498f-8796-2c798f70d259-kube-api-access-5s88m\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-bhf62\" (UID: \"0fec699f-ccdd-498f-8796-2c798f70d259\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-bhf62" Nov 21 14:30:35 crc kubenswrapper[5133]: I1121 14:30:35.640396 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-bhf62" Nov 21 14:30:36 crc kubenswrapper[5133]: I1121 14:30:36.020420 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-bhf62"] Nov 21 14:30:36 crc kubenswrapper[5133]: I1121 14:30:36.239266 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-bhf62" event={"ID":"0fec699f-ccdd-498f-8796-2c798f70d259","Type":"ContainerStarted","Data":"a207f8dca669926ddf772edea8c43d520e84f368bc5d6f7d2e8edee567c84247"} Nov 21 14:30:37 crc kubenswrapper[5133]: I1121 14:30:37.254300 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-bhf62" event={"ID":"0fec699f-ccdd-498f-8796-2c798f70d259","Type":"ContainerStarted","Data":"b0f49739b027d7189138fe1724ddbf627d342f4405d3f8daf862d713159e0627"} Nov 21 14:30:37 crc kubenswrapper[5133]: I1121 14:30:37.271782 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-bhf62" podStartSLOduration=1.5803658409999999 podStartE2EDuration="2.271758107s" podCreationTimestamp="2025-11-21 14:30:35 +0000 UTC" firstStartedPulling="2025-11-21 14:30:36.031901401 +0000 UTC m=+2895.829733659" lastFinishedPulling="2025-11-21 14:30:36.723293677 +0000 UTC m=+2896.521125925" observedRunningTime="2025-11-21 14:30:37.27114227 +0000 UTC m=+2897.068974538" watchObservedRunningTime="2025-11-21 14:30:37.271758107 +0000 UTC m=+2897.069590365" Nov 21 14:30:38 crc kubenswrapper[5133]: I1121 14:30:38.201071 5133 scope.go:117] "RemoveContainer" containerID="d20c2728dcc96c4cd1306e3a9967d3012671b156ddf3797e7e37755333282238" Nov 21 14:31:53 crc kubenswrapper[5133]: I1121 14:31:53.310836 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 14:31:53 crc kubenswrapper[5133]: I1121 14:31:53.311922 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 14:32:21 crc kubenswrapper[5133]: I1121 14:32:21.357767 5133 generic.go:334] "Generic (PLEG): container finished" podID="0fec699f-ccdd-498f-8796-2c798f70d259" containerID="b0f49739b027d7189138fe1724ddbf627d342f4405d3f8daf862d713159e0627" exitCode=0 Nov 21 14:32:21 crc kubenswrapper[5133]: I1121 14:32:21.357950 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-bhf62" event={"ID":"0fec699f-ccdd-498f-8796-2c798f70d259","Type":"ContainerDied","Data":"b0f49739b027d7189138fe1724ddbf627d342f4405d3f8daf862d713159e0627"} Nov 21 14:32:22 crc kubenswrapper[5133]: I1121 14:32:22.769663 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-bhf62" Nov 21 14:32:22 crc kubenswrapper[5133]: I1121 14:32:22.967912 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0fec699f-ccdd-498f-8796-2c798f70d259-inventory\") pod \"0fec699f-ccdd-498f-8796-2c798f70d259\" (UID: \"0fec699f-ccdd-498f-8796-2c798f70d259\") " Nov 21 14:32:22 crc kubenswrapper[5133]: I1121 14:32:22.967989 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/0fec699f-ccdd-498f-8796-2c798f70d259-ceph\") pod \"0fec699f-ccdd-498f-8796-2c798f70d259\" (UID: \"0fec699f-ccdd-498f-8796-2c798f70d259\") " Nov 21 14:32:22 crc kubenswrapper[5133]: I1121 14:32:22.968089 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5s88m\" (UniqueName: \"kubernetes.io/projected/0fec699f-ccdd-498f-8796-2c798f70d259-kube-api-access-5s88m\") pod \"0fec699f-ccdd-498f-8796-2c798f70d259\" (UID: \"0fec699f-ccdd-498f-8796-2c798f70d259\") " Nov 21 14:32:22 crc kubenswrapper[5133]: I1121 14:32:22.968648 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0fec699f-ccdd-498f-8796-2c798f70d259-ssh-key\") pod \"0fec699f-ccdd-498f-8796-2c798f70d259\" (UID: \"0fec699f-ccdd-498f-8796-2c798f70d259\") " Nov 21 14:32:22 crc kubenswrapper[5133]: I1121 14:32:22.968862 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/0fec699f-ccdd-498f-8796-2c798f70d259-ovncontroller-config-0\") pod \"0fec699f-ccdd-498f-8796-2c798f70d259\" (UID: \"0fec699f-ccdd-498f-8796-2c798f70d259\") " Nov 21 14:32:22 crc kubenswrapper[5133]: I1121 14:32:22.969099 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0fec699f-ccdd-498f-8796-2c798f70d259-ovn-combined-ca-bundle\") pod \"0fec699f-ccdd-498f-8796-2c798f70d259\" (UID: \"0fec699f-ccdd-498f-8796-2c798f70d259\") " Nov 21 14:32:22 crc kubenswrapper[5133]: I1121 14:32:22.974054 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0fec699f-ccdd-498f-8796-2c798f70d259-ceph" (OuterVolumeSpecName: "ceph") pod "0fec699f-ccdd-498f-8796-2c798f70d259" (UID: "0fec699f-ccdd-498f-8796-2c798f70d259"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:32:22 crc kubenswrapper[5133]: I1121 14:32:22.978907 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0fec699f-ccdd-498f-8796-2c798f70d259-kube-api-access-5s88m" (OuterVolumeSpecName: "kube-api-access-5s88m") pod "0fec699f-ccdd-498f-8796-2c798f70d259" (UID: "0fec699f-ccdd-498f-8796-2c798f70d259"). InnerVolumeSpecName "kube-api-access-5s88m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:32:22 crc kubenswrapper[5133]: I1121 14:32:22.989983 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0fec699f-ccdd-498f-8796-2c798f70d259-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "0fec699f-ccdd-498f-8796-2c798f70d259" (UID: "0fec699f-ccdd-498f-8796-2c798f70d259"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:32:23 crc kubenswrapper[5133]: I1121 14:32:23.002642 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0fec699f-ccdd-498f-8796-2c798f70d259-ovncontroller-config-0" (OuterVolumeSpecName: "ovncontroller-config-0") pod "0fec699f-ccdd-498f-8796-2c798f70d259" (UID: "0fec699f-ccdd-498f-8796-2c798f70d259"). InnerVolumeSpecName "ovncontroller-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:32:23 crc kubenswrapper[5133]: I1121 14:32:23.009259 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0fec699f-ccdd-498f-8796-2c798f70d259-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "0fec699f-ccdd-498f-8796-2c798f70d259" (UID: "0fec699f-ccdd-498f-8796-2c798f70d259"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:32:23 crc kubenswrapper[5133]: I1121 14:32:23.022597 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0fec699f-ccdd-498f-8796-2c798f70d259-inventory" (OuterVolumeSpecName: "inventory") pod "0fec699f-ccdd-498f-8796-2c798f70d259" (UID: "0fec699f-ccdd-498f-8796-2c798f70d259"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:32:23 crc kubenswrapper[5133]: I1121 14:32:23.071542 5133 reconciler_common.go:293] "Volume detached for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/0fec699f-ccdd-498f-8796-2c798f70d259-ovncontroller-config-0\") on node \"crc\" DevicePath \"\"" Nov 21 14:32:23 crc kubenswrapper[5133]: I1121 14:32:23.071584 5133 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0fec699f-ccdd-498f-8796-2c798f70d259-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 14:32:23 crc kubenswrapper[5133]: I1121 14:32:23.071597 5133 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0fec699f-ccdd-498f-8796-2c798f70d259-inventory\") on node \"crc\" DevicePath \"\"" Nov 21 14:32:23 crc kubenswrapper[5133]: I1121 14:32:23.071608 5133 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/0fec699f-ccdd-498f-8796-2c798f70d259-ceph\") on node \"crc\" DevicePath \"\"" Nov 21 14:32:23 crc kubenswrapper[5133]: I1121 14:32:23.071620 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5s88m\" (UniqueName: \"kubernetes.io/projected/0fec699f-ccdd-498f-8796-2c798f70d259-kube-api-access-5s88m\") on node \"crc\" DevicePath \"\"" Nov 21 14:32:23 crc kubenswrapper[5133]: I1121 14:32:23.071630 5133 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0fec699f-ccdd-498f-8796-2c798f70d259-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 21 14:32:23 crc kubenswrapper[5133]: I1121 14:32:23.311223 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 14:32:23 crc kubenswrapper[5133]: I1121 14:32:23.311334 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 14:32:23 crc kubenswrapper[5133]: I1121 14:32:23.382852 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-bhf62" event={"ID":"0fec699f-ccdd-498f-8796-2c798f70d259","Type":"ContainerDied","Data":"a207f8dca669926ddf772edea8c43d520e84f368bc5d6f7d2e8edee567c84247"} Nov 21 14:32:23 crc kubenswrapper[5133]: I1121 14:32:23.382900 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a207f8dca669926ddf772edea8c43d520e84f368bc5d6f7d2e8edee567c84247" Nov 21 14:32:23 crc kubenswrapper[5133]: I1121 14:32:23.382958 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-bhf62" Nov 21 14:32:23 crc kubenswrapper[5133]: I1121 14:32:23.567722 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-fjdcm"] Nov 21 14:32:23 crc kubenswrapper[5133]: E1121 14:32:23.568221 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0fec699f-ccdd-498f-8796-2c798f70d259" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 21 14:32:23 crc kubenswrapper[5133]: I1121 14:32:23.568275 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="0fec699f-ccdd-498f-8796-2c798f70d259" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 21 14:32:23 crc kubenswrapper[5133]: I1121 14:32:23.568431 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="0fec699f-ccdd-498f-8796-2c798f70d259" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 21 14:32:23 crc kubenswrapper[5133]: I1121 14:32:23.578014 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-fjdcm"] Nov 21 14:32:23 crc kubenswrapper[5133]: I1121 14:32:23.578181 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-fjdcm" Nov 21 14:32:23 crc kubenswrapper[5133]: I1121 14:32:23.582434 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 21 14:32:23 crc kubenswrapper[5133]: I1121 14:32:23.582471 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-ovn-metadata-agent-neutron-config" Nov 21 14:32:23 crc kubenswrapper[5133]: I1121 14:32:23.582701 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 21 14:32:23 crc kubenswrapper[5133]: I1121 14:32:23.582796 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-ztmk4" Nov 21 14:32:23 crc kubenswrapper[5133]: I1121 14:32:23.582904 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 21 14:32:23 crc kubenswrapper[5133]: I1121 14:32:23.582938 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 21 14:32:23 crc kubenswrapper[5133]: I1121 14:32:23.583068 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-neutron-config" Nov 21 14:32:23 crc kubenswrapper[5133]: I1121 14:32:23.681568 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/8d100df8-21d2-4dde-a9cf-25e3ad027bb7-ceph\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-fjdcm\" (UID: \"8d100df8-21d2-4dde-a9cf-25e3ad027bb7\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-fjdcm" Nov 21 14:32:23 crc kubenswrapper[5133]: I1121 14:32:23.681627 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8d100df8-21d2-4dde-a9cf-25e3ad027bb7-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-fjdcm\" (UID: \"8d100df8-21d2-4dde-a9cf-25e3ad027bb7\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-fjdcm" Nov 21 14:32:23 crc kubenswrapper[5133]: I1121 14:32:23.681729 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/8d100df8-21d2-4dde-a9cf-25e3ad027bb7-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-fjdcm\" (UID: \"8d100df8-21d2-4dde-a9cf-25e3ad027bb7\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-fjdcm" Nov 21 14:32:23 crc kubenswrapper[5133]: I1121 14:32:23.681825 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/8d100df8-21d2-4dde-a9cf-25e3ad027bb7-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-fjdcm\" (UID: \"8d100df8-21d2-4dde-a9cf-25e3ad027bb7\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-fjdcm" Nov 21 14:32:23 crc kubenswrapper[5133]: I1121 14:32:23.681878 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d100df8-21d2-4dde-a9cf-25e3ad027bb7-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-fjdcm\" (UID: \"8d100df8-21d2-4dde-a9cf-25e3ad027bb7\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-fjdcm" Nov 21 14:32:23 crc kubenswrapper[5133]: I1121 14:32:23.681921 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8d100df8-21d2-4dde-a9cf-25e3ad027bb7-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-fjdcm\" (UID: \"8d100df8-21d2-4dde-a9cf-25e3ad027bb7\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-fjdcm" Nov 21 14:32:23 crc kubenswrapper[5133]: I1121 14:32:23.681940 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-572zv\" (UniqueName: \"kubernetes.io/projected/8d100df8-21d2-4dde-a9cf-25e3ad027bb7-kube-api-access-572zv\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-fjdcm\" (UID: \"8d100df8-21d2-4dde-a9cf-25e3ad027bb7\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-fjdcm" Nov 21 14:32:23 crc kubenswrapper[5133]: I1121 14:32:23.783901 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d100df8-21d2-4dde-a9cf-25e3ad027bb7-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-fjdcm\" (UID: \"8d100df8-21d2-4dde-a9cf-25e3ad027bb7\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-fjdcm" Nov 21 14:32:23 crc kubenswrapper[5133]: I1121 14:32:23.783945 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8d100df8-21d2-4dde-a9cf-25e3ad027bb7-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-fjdcm\" (UID: \"8d100df8-21d2-4dde-a9cf-25e3ad027bb7\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-fjdcm" Nov 21 14:32:23 crc kubenswrapper[5133]: I1121 14:32:23.783965 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-572zv\" (UniqueName: \"kubernetes.io/projected/8d100df8-21d2-4dde-a9cf-25e3ad027bb7-kube-api-access-572zv\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-fjdcm\" (UID: \"8d100df8-21d2-4dde-a9cf-25e3ad027bb7\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-fjdcm" Nov 21 14:32:23 crc kubenswrapper[5133]: I1121 14:32:23.784055 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/8d100df8-21d2-4dde-a9cf-25e3ad027bb7-ceph\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-fjdcm\" (UID: \"8d100df8-21d2-4dde-a9cf-25e3ad027bb7\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-fjdcm" Nov 21 14:32:23 crc kubenswrapper[5133]: I1121 14:32:23.784074 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8d100df8-21d2-4dde-a9cf-25e3ad027bb7-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-fjdcm\" (UID: \"8d100df8-21d2-4dde-a9cf-25e3ad027bb7\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-fjdcm" Nov 21 14:32:23 crc kubenswrapper[5133]: I1121 14:32:23.784120 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/8d100df8-21d2-4dde-a9cf-25e3ad027bb7-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-fjdcm\" (UID: \"8d100df8-21d2-4dde-a9cf-25e3ad027bb7\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-fjdcm" Nov 21 14:32:23 crc kubenswrapper[5133]: I1121 14:32:23.784156 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/8d100df8-21d2-4dde-a9cf-25e3ad027bb7-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-fjdcm\" (UID: \"8d100df8-21d2-4dde-a9cf-25e3ad027bb7\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-fjdcm" Nov 21 14:32:23 crc kubenswrapper[5133]: I1121 14:32:23.789136 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/8d100df8-21d2-4dde-a9cf-25e3ad027bb7-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-fjdcm\" (UID: \"8d100df8-21d2-4dde-a9cf-25e3ad027bb7\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-fjdcm" Nov 21 14:32:23 crc kubenswrapper[5133]: I1121 14:32:23.789243 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/8d100df8-21d2-4dde-a9cf-25e3ad027bb7-ceph\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-fjdcm\" (UID: \"8d100df8-21d2-4dde-a9cf-25e3ad027bb7\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-fjdcm" Nov 21 14:32:23 crc kubenswrapper[5133]: I1121 14:32:23.789455 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8d100df8-21d2-4dde-a9cf-25e3ad027bb7-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-fjdcm\" (UID: \"8d100df8-21d2-4dde-a9cf-25e3ad027bb7\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-fjdcm" Nov 21 14:32:23 crc kubenswrapper[5133]: I1121 14:32:23.790170 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/8d100df8-21d2-4dde-a9cf-25e3ad027bb7-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-fjdcm\" (UID: \"8d100df8-21d2-4dde-a9cf-25e3ad027bb7\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-fjdcm" Nov 21 14:32:23 crc kubenswrapper[5133]: I1121 14:32:23.800232 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d100df8-21d2-4dde-a9cf-25e3ad027bb7-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-fjdcm\" (UID: \"8d100df8-21d2-4dde-a9cf-25e3ad027bb7\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-fjdcm" Nov 21 14:32:23 crc kubenswrapper[5133]: I1121 14:32:23.801548 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8d100df8-21d2-4dde-a9cf-25e3ad027bb7-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-fjdcm\" (UID: \"8d100df8-21d2-4dde-a9cf-25e3ad027bb7\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-fjdcm" Nov 21 14:32:23 crc kubenswrapper[5133]: I1121 14:32:23.809527 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-572zv\" (UniqueName: \"kubernetes.io/projected/8d100df8-21d2-4dde-a9cf-25e3ad027bb7-kube-api-access-572zv\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-fjdcm\" (UID: \"8d100df8-21d2-4dde-a9cf-25e3ad027bb7\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-fjdcm" Nov 21 14:32:23 crc kubenswrapper[5133]: I1121 14:32:23.912512 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-fjdcm" Nov 21 14:32:24 crc kubenswrapper[5133]: I1121 14:32:24.530660 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-fjdcm"] Nov 21 14:32:25 crc kubenswrapper[5133]: I1121 14:32:25.431739 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-fjdcm" event={"ID":"8d100df8-21d2-4dde-a9cf-25e3ad027bb7","Type":"ContainerStarted","Data":"d944baa27de80bd592ef34952e5b4f713214c6fe7f34723ee8ca6ab2d06e6ffa"} Nov 21 14:32:26 crc kubenswrapper[5133]: I1121 14:32:26.441191 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-fjdcm" event={"ID":"8d100df8-21d2-4dde-a9cf-25e3ad027bb7","Type":"ContainerStarted","Data":"fb4cdc9a6f3209040beb47ca80b41b48fefe455c48ff3d80bfa3288e06d12424"} Nov 21 14:32:26 crc kubenswrapper[5133]: I1121 14:32:26.459858 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-fjdcm" podStartSLOduration=2.563623867 podStartE2EDuration="3.459845097s" podCreationTimestamp="2025-11-21 14:32:23 +0000 UTC" firstStartedPulling="2025-11-21 14:32:24.539267018 +0000 UTC m=+3004.337099266" lastFinishedPulling="2025-11-21 14:32:25.435488248 +0000 UTC m=+3005.233320496" observedRunningTime="2025-11-21 14:32:26.456228149 +0000 UTC m=+3006.254060427" watchObservedRunningTime="2025-11-21 14:32:26.459845097 +0000 UTC m=+3006.257677345" Nov 21 14:32:53 crc kubenswrapper[5133]: I1121 14:32:53.310835 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 14:32:53 crc kubenswrapper[5133]: I1121 14:32:53.313170 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 14:32:53 crc kubenswrapper[5133]: I1121 14:32:53.313249 5133 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" Nov 21 14:32:53 crc kubenswrapper[5133]: I1121 14:32:53.314262 5133 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"76b8364e4af45cbd19eaaf4b68ddf1c52013121c27d1586f3a2c762b2e050cef"} pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 14:32:53 crc kubenswrapper[5133]: I1121 14:32:53.314368 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" containerID="cri-o://76b8364e4af45cbd19eaaf4b68ddf1c52013121c27d1586f3a2c762b2e050cef" gracePeriod=600 Nov 21 14:32:53 crc kubenswrapper[5133]: E1121 14:32:53.625260 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:32:53 crc kubenswrapper[5133]: I1121 14:32:53.700462 5133 generic.go:334] "Generic (PLEG): container finished" podID="52f5a729-05d1-4f84-a216-1df3233af57d" containerID="76b8364e4af45cbd19eaaf4b68ddf1c52013121c27d1586f3a2c762b2e050cef" exitCode=0 Nov 21 14:32:53 crc kubenswrapper[5133]: I1121 14:32:53.700518 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" event={"ID":"52f5a729-05d1-4f84-a216-1df3233af57d","Type":"ContainerDied","Data":"76b8364e4af45cbd19eaaf4b68ddf1c52013121c27d1586f3a2c762b2e050cef"} Nov 21 14:32:53 crc kubenswrapper[5133]: I1121 14:32:53.700556 5133 scope.go:117] "RemoveContainer" containerID="1f1186518ac0102818dd70469c0c9f88a271d9007ab1854097e5d4f6bcfd7ea2" Nov 21 14:32:53 crc kubenswrapper[5133]: I1121 14:32:53.701476 5133 scope.go:117] "RemoveContainer" containerID="76b8364e4af45cbd19eaaf4b68ddf1c52013121c27d1586f3a2c762b2e050cef" Nov 21 14:32:53 crc kubenswrapper[5133]: E1121 14:32:53.701928 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:33:07 crc kubenswrapper[5133]: I1121 14:33:07.458931 5133 scope.go:117] "RemoveContainer" containerID="76b8364e4af45cbd19eaaf4b68ddf1c52013121c27d1586f3a2c762b2e050cef" Nov 21 14:33:07 crc kubenswrapper[5133]: E1121 14:33:07.460195 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:33:22 crc kubenswrapper[5133]: I1121 14:33:22.467964 5133 scope.go:117] "RemoveContainer" containerID="76b8364e4af45cbd19eaaf4b68ddf1c52013121c27d1586f3a2c762b2e050cef" Nov 21 14:33:22 crc kubenswrapper[5133]: E1121 14:33:22.468840 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:33:33 crc kubenswrapper[5133]: I1121 14:33:33.098530 5133 generic.go:334] "Generic (PLEG): container finished" podID="8d100df8-21d2-4dde-a9cf-25e3ad027bb7" containerID="fb4cdc9a6f3209040beb47ca80b41b48fefe455c48ff3d80bfa3288e06d12424" exitCode=0 Nov 21 14:33:33 crc kubenswrapper[5133]: I1121 14:33:33.098623 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-fjdcm" event={"ID":"8d100df8-21d2-4dde-a9cf-25e3ad027bb7","Type":"ContainerDied","Data":"fb4cdc9a6f3209040beb47ca80b41b48fefe455c48ff3d80bfa3288e06d12424"} Nov 21 14:33:34 crc kubenswrapper[5133]: I1121 14:33:34.458376 5133 scope.go:117] "RemoveContainer" containerID="76b8364e4af45cbd19eaaf4b68ddf1c52013121c27d1586f3a2c762b2e050cef" Nov 21 14:33:34 crc kubenswrapper[5133]: E1121 14:33:34.459341 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:33:34 crc kubenswrapper[5133]: I1121 14:33:34.520847 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-fjdcm" Nov 21 14:33:34 crc kubenswrapper[5133]: I1121 14:33:34.594894 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/8d100df8-21d2-4dde-a9cf-25e3ad027bb7-neutron-ovn-metadata-agent-neutron-config-0\") pod \"8d100df8-21d2-4dde-a9cf-25e3ad027bb7\" (UID: \"8d100df8-21d2-4dde-a9cf-25e3ad027bb7\") " Nov 21 14:33:34 crc kubenswrapper[5133]: I1121 14:33:34.595313 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-572zv\" (UniqueName: \"kubernetes.io/projected/8d100df8-21d2-4dde-a9cf-25e3ad027bb7-kube-api-access-572zv\") pod \"8d100df8-21d2-4dde-a9cf-25e3ad027bb7\" (UID: \"8d100df8-21d2-4dde-a9cf-25e3ad027bb7\") " Nov 21 14:33:34 crc kubenswrapper[5133]: I1121 14:33:34.595347 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8d100df8-21d2-4dde-a9cf-25e3ad027bb7-inventory\") pod \"8d100df8-21d2-4dde-a9cf-25e3ad027bb7\" (UID: \"8d100df8-21d2-4dde-a9cf-25e3ad027bb7\") " Nov 21 14:33:34 crc kubenswrapper[5133]: I1121 14:33:34.595425 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/8d100df8-21d2-4dde-a9cf-25e3ad027bb7-ceph\") pod \"8d100df8-21d2-4dde-a9cf-25e3ad027bb7\" (UID: \"8d100df8-21d2-4dde-a9cf-25e3ad027bb7\") " Nov 21 14:33:34 crc kubenswrapper[5133]: I1121 14:33:34.595580 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/8d100df8-21d2-4dde-a9cf-25e3ad027bb7-nova-metadata-neutron-config-0\") pod \"8d100df8-21d2-4dde-a9cf-25e3ad027bb7\" (UID: \"8d100df8-21d2-4dde-a9cf-25e3ad027bb7\") " Nov 21 14:33:34 crc kubenswrapper[5133]: I1121 14:33:34.595663 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d100df8-21d2-4dde-a9cf-25e3ad027bb7-neutron-metadata-combined-ca-bundle\") pod \"8d100df8-21d2-4dde-a9cf-25e3ad027bb7\" (UID: \"8d100df8-21d2-4dde-a9cf-25e3ad027bb7\") " Nov 21 14:33:34 crc kubenswrapper[5133]: I1121 14:33:34.595699 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8d100df8-21d2-4dde-a9cf-25e3ad027bb7-ssh-key\") pod \"8d100df8-21d2-4dde-a9cf-25e3ad027bb7\" (UID: \"8d100df8-21d2-4dde-a9cf-25e3ad027bb7\") " Nov 21 14:33:34 crc kubenswrapper[5133]: I1121 14:33:34.603802 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d100df8-21d2-4dde-a9cf-25e3ad027bb7-ceph" (OuterVolumeSpecName: "ceph") pod "8d100df8-21d2-4dde-a9cf-25e3ad027bb7" (UID: "8d100df8-21d2-4dde-a9cf-25e3ad027bb7"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:33:34 crc kubenswrapper[5133]: I1121 14:33:34.604297 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d100df8-21d2-4dde-a9cf-25e3ad027bb7-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "8d100df8-21d2-4dde-a9cf-25e3ad027bb7" (UID: "8d100df8-21d2-4dde-a9cf-25e3ad027bb7"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:33:34 crc kubenswrapper[5133]: I1121 14:33:34.605854 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8d100df8-21d2-4dde-a9cf-25e3ad027bb7-kube-api-access-572zv" (OuterVolumeSpecName: "kube-api-access-572zv") pod "8d100df8-21d2-4dde-a9cf-25e3ad027bb7" (UID: "8d100df8-21d2-4dde-a9cf-25e3ad027bb7"). InnerVolumeSpecName "kube-api-access-572zv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:33:34 crc kubenswrapper[5133]: I1121 14:33:34.626992 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d100df8-21d2-4dde-a9cf-25e3ad027bb7-nova-metadata-neutron-config-0" (OuterVolumeSpecName: "nova-metadata-neutron-config-0") pod "8d100df8-21d2-4dde-a9cf-25e3ad027bb7" (UID: "8d100df8-21d2-4dde-a9cf-25e3ad027bb7"). InnerVolumeSpecName "nova-metadata-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:33:34 crc kubenswrapper[5133]: I1121 14:33:34.627319 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d100df8-21d2-4dde-a9cf-25e3ad027bb7-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "8d100df8-21d2-4dde-a9cf-25e3ad027bb7" (UID: "8d100df8-21d2-4dde-a9cf-25e3ad027bb7"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:33:34 crc kubenswrapper[5133]: I1121 14:33:34.629264 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d100df8-21d2-4dde-a9cf-25e3ad027bb7-neutron-ovn-metadata-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-ovn-metadata-agent-neutron-config-0") pod "8d100df8-21d2-4dde-a9cf-25e3ad027bb7" (UID: "8d100df8-21d2-4dde-a9cf-25e3ad027bb7"). InnerVolumeSpecName "neutron-ovn-metadata-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:33:34 crc kubenswrapper[5133]: I1121 14:33:34.629296 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d100df8-21d2-4dde-a9cf-25e3ad027bb7-inventory" (OuterVolumeSpecName: "inventory") pod "8d100df8-21d2-4dde-a9cf-25e3ad027bb7" (UID: "8d100df8-21d2-4dde-a9cf-25e3ad027bb7"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:33:34 crc kubenswrapper[5133]: I1121 14:33:34.698603 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-572zv\" (UniqueName: \"kubernetes.io/projected/8d100df8-21d2-4dde-a9cf-25e3ad027bb7-kube-api-access-572zv\") on node \"crc\" DevicePath \"\"" Nov 21 14:33:34 crc kubenswrapper[5133]: I1121 14:33:34.698656 5133 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8d100df8-21d2-4dde-a9cf-25e3ad027bb7-inventory\") on node \"crc\" DevicePath \"\"" Nov 21 14:33:34 crc kubenswrapper[5133]: I1121 14:33:34.698669 5133 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/8d100df8-21d2-4dde-a9cf-25e3ad027bb7-ceph\") on node \"crc\" DevicePath \"\"" Nov 21 14:33:34 crc kubenswrapper[5133]: I1121 14:33:34.698680 5133 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/8d100df8-21d2-4dde-a9cf-25e3ad027bb7-nova-metadata-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 21 14:33:34 crc kubenswrapper[5133]: I1121 14:33:34.698692 5133 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d100df8-21d2-4dde-a9cf-25e3ad027bb7-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 14:33:34 crc kubenswrapper[5133]: I1121 14:33:34.698705 5133 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8d100df8-21d2-4dde-a9cf-25e3ad027bb7-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 21 14:33:34 crc kubenswrapper[5133]: I1121 14:33:34.698715 5133 reconciler_common.go:293] "Volume detached for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/8d100df8-21d2-4dde-a9cf-25e3ad027bb7-neutron-ovn-metadata-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 21 14:33:35 crc kubenswrapper[5133]: I1121 14:33:35.121544 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-fjdcm" event={"ID":"8d100df8-21d2-4dde-a9cf-25e3ad027bb7","Type":"ContainerDied","Data":"d944baa27de80bd592ef34952e5b4f713214c6fe7f34723ee8ca6ab2d06e6ffa"} Nov 21 14:33:35 crc kubenswrapper[5133]: I1121 14:33:35.121590 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d944baa27de80bd592ef34952e5b4f713214c6fe7f34723ee8ca6ab2d06e6ffa" Nov 21 14:33:35 crc kubenswrapper[5133]: I1121 14:33:35.121623 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-fjdcm" Nov 21 14:33:35 crc kubenswrapper[5133]: I1121 14:33:35.229244 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5kst7"] Nov 21 14:33:35 crc kubenswrapper[5133]: E1121 14:33:35.229919 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d100df8-21d2-4dde-a9cf-25e3ad027bb7" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 21 14:33:35 crc kubenswrapper[5133]: I1121 14:33:35.229954 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d100df8-21d2-4dde-a9cf-25e3ad027bb7" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 21 14:33:35 crc kubenswrapper[5133]: I1121 14:33:35.230285 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="8d100df8-21d2-4dde-a9cf-25e3ad027bb7" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 21 14:33:35 crc kubenswrapper[5133]: I1121 14:33:35.231492 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5kst7" Nov 21 14:33:35 crc kubenswrapper[5133]: I1121 14:33:35.235110 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 21 14:33:35 crc kubenswrapper[5133]: I1121 14:33:35.235409 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-ztmk4" Nov 21 14:33:35 crc kubenswrapper[5133]: I1121 14:33:35.235451 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 21 14:33:35 crc kubenswrapper[5133]: I1121 14:33:35.235719 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"libvirt-secret" Nov 21 14:33:35 crc kubenswrapper[5133]: I1121 14:33:35.235759 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 21 14:33:35 crc kubenswrapper[5133]: I1121 14:33:35.239774 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 21 14:33:35 crc kubenswrapper[5133]: I1121 14:33:35.244222 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5kst7"] Nov 21 14:33:35 crc kubenswrapper[5133]: I1121 14:33:35.309226 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/7e411878-ed75-43cd-9684-6f5bc9f58f1f-ceph\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-5kst7\" (UID: \"7e411878-ed75-43cd-9684-6f5bc9f58f1f\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5kst7" Nov 21 14:33:35 crc kubenswrapper[5133]: I1121 14:33:35.309320 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e411878-ed75-43cd-9684-6f5bc9f58f1f-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-5kst7\" (UID: \"7e411878-ed75-43cd-9684-6f5bc9f58f1f\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5kst7" Nov 21 14:33:35 crc kubenswrapper[5133]: I1121 14:33:35.309409 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7e411878-ed75-43cd-9684-6f5bc9f58f1f-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-5kst7\" (UID: \"7e411878-ed75-43cd-9684-6f5bc9f58f1f\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5kst7" Nov 21 14:33:35 crc kubenswrapper[5133]: I1121 14:33:35.309662 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7e411878-ed75-43cd-9684-6f5bc9f58f1f-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-5kst7\" (UID: \"7e411878-ed75-43cd-9684-6f5bc9f58f1f\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5kst7" Nov 21 14:33:35 crc kubenswrapper[5133]: I1121 14:33:35.309759 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9zx6f\" (UniqueName: \"kubernetes.io/projected/7e411878-ed75-43cd-9684-6f5bc9f58f1f-kube-api-access-9zx6f\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-5kst7\" (UID: \"7e411878-ed75-43cd-9684-6f5bc9f58f1f\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5kst7" Nov 21 14:33:35 crc kubenswrapper[5133]: I1121 14:33:35.309823 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/7e411878-ed75-43cd-9684-6f5bc9f58f1f-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-5kst7\" (UID: \"7e411878-ed75-43cd-9684-6f5bc9f58f1f\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5kst7" Nov 21 14:33:35 crc kubenswrapper[5133]: I1121 14:33:35.411786 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/7e411878-ed75-43cd-9684-6f5bc9f58f1f-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-5kst7\" (UID: \"7e411878-ed75-43cd-9684-6f5bc9f58f1f\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5kst7" Nov 21 14:33:35 crc kubenswrapper[5133]: I1121 14:33:35.411879 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/7e411878-ed75-43cd-9684-6f5bc9f58f1f-ceph\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-5kst7\" (UID: \"7e411878-ed75-43cd-9684-6f5bc9f58f1f\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5kst7" Nov 21 14:33:35 crc kubenswrapper[5133]: I1121 14:33:35.411916 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e411878-ed75-43cd-9684-6f5bc9f58f1f-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-5kst7\" (UID: \"7e411878-ed75-43cd-9684-6f5bc9f58f1f\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5kst7" Nov 21 14:33:35 crc kubenswrapper[5133]: I1121 14:33:35.411948 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7e411878-ed75-43cd-9684-6f5bc9f58f1f-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-5kst7\" (UID: \"7e411878-ed75-43cd-9684-6f5bc9f58f1f\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5kst7" Nov 21 14:33:35 crc kubenswrapper[5133]: I1121 14:33:35.412061 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7e411878-ed75-43cd-9684-6f5bc9f58f1f-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-5kst7\" (UID: \"7e411878-ed75-43cd-9684-6f5bc9f58f1f\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5kst7" Nov 21 14:33:35 crc kubenswrapper[5133]: I1121 14:33:35.412109 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9zx6f\" (UniqueName: \"kubernetes.io/projected/7e411878-ed75-43cd-9684-6f5bc9f58f1f-kube-api-access-9zx6f\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-5kst7\" (UID: \"7e411878-ed75-43cd-9684-6f5bc9f58f1f\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5kst7" Nov 21 14:33:35 crc kubenswrapper[5133]: I1121 14:33:35.415907 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/7e411878-ed75-43cd-9684-6f5bc9f58f1f-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-5kst7\" (UID: \"7e411878-ed75-43cd-9684-6f5bc9f58f1f\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5kst7" Nov 21 14:33:35 crc kubenswrapper[5133]: I1121 14:33:35.416177 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7e411878-ed75-43cd-9684-6f5bc9f58f1f-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-5kst7\" (UID: \"7e411878-ed75-43cd-9684-6f5bc9f58f1f\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5kst7" Nov 21 14:33:35 crc kubenswrapper[5133]: I1121 14:33:35.416411 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/7e411878-ed75-43cd-9684-6f5bc9f58f1f-ceph\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-5kst7\" (UID: \"7e411878-ed75-43cd-9684-6f5bc9f58f1f\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5kst7" Nov 21 14:33:35 crc kubenswrapper[5133]: I1121 14:33:35.416970 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7e411878-ed75-43cd-9684-6f5bc9f58f1f-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-5kst7\" (UID: \"7e411878-ed75-43cd-9684-6f5bc9f58f1f\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5kst7" Nov 21 14:33:35 crc kubenswrapper[5133]: I1121 14:33:35.426329 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e411878-ed75-43cd-9684-6f5bc9f58f1f-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-5kst7\" (UID: \"7e411878-ed75-43cd-9684-6f5bc9f58f1f\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5kst7" Nov 21 14:33:35 crc kubenswrapper[5133]: I1121 14:33:35.428417 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9zx6f\" (UniqueName: \"kubernetes.io/projected/7e411878-ed75-43cd-9684-6f5bc9f58f1f-kube-api-access-9zx6f\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-5kst7\" (UID: \"7e411878-ed75-43cd-9684-6f5bc9f58f1f\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5kst7" Nov 21 14:33:35 crc kubenswrapper[5133]: I1121 14:33:35.560411 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5kst7" Nov 21 14:33:36 crc kubenswrapper[5133]: I1121 14:33:36.131735 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5kst7"] Nov 21 14:33:37 crc kubenswrapper[5133]: I1121 14:33:37.144369 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5kst7" event={"ID":"7e411878-ed75-43cd-9684-6f5bc9f58f1f","Type":"ContainerStarted","Data":"bc7ebb8632065ca0e5d1328b2d111ad05257c467fb01ca19b7f787a108c2d869"} Nov 21 14:33:37 crc kubenswrapper[5133]: I1121 14:33:37.145080 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5kst7" event={"ID":"7e411878-ed75-43cd-9684-6f5bc9f58f1f","Type":"ContainerStarted","Data":"6a27296a8c909627ccffbc46a1e4833004aa473ba8e85c80ca6cb1244a69f1eb"} Nov 21 14:33:45 crc kubenswrapper[5133]: I1121 14:33:45.457748 5133 scope.go:117] "RemoveContainer" containerID="76b8364e4af45cbd19eaaf4b68ddf1c52013121c27d1586f3a2c762b2e050cef" Nov 21 14:33:45 crc kubenswrapper[5133]: E1121 14:33:45.459867 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:33:57 crc kubenswrapper[5133]: I1121 14:33:57.457898 5133 scope.go:117] "RemoveContainer" containerID="76b8364e4af45cbd19eaaf4b68ddf1c52013121c27d1586f3a2c762b2e050cef" Nov 21 14:33:57 crc kubenswrapper[5133]: E1121 14:33:57.458778 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:34:08 crc kubenswrapper[5133]: I1121 14:34:08.458069 5133 scope.go:117] "RemoveContainer" containerID="76b8364e4af45cbd19eaaf4b68ddf1c52013121c27d1586f3a2c762b2e050cef" Nov 21 14:34:08 crc kubenswrapper[5133]: E1121 14:34:08.459473 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:34:11 crc kubenswrapper[5133]: I1121 14:34:11.404671 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5kst7" podStartSLOduration=35.955879906 podStartE2EDuration="36.404646177s" podCreationTimestamp="2025-11-21 14:33:35 +0000 UTC" firstStartedPulling="2025-11-21 14:33:36.145459026 +0000 UTC m=+3075.943291274" lastFinishedPulling="2025-11-21 14:33:36.594225297 +0000 UTC m=+3076.392057545" observedRunningTime="2025-11-21 14:33:37.171518943 +0000 UTC m=+3076.969351191" watchObservedRunningTime="2025-11-21 14:34:11.404646177 +0000 UTC m=+3111.202478445" Nov 21 14:34:11 crc kubenswrapper[5133]: I1121 14:34:11.427371 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-nmtpr"] Nov 21 14:34:11 crc kubenswrapper[5133]: I1121 14:34:11.430059 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nmtpr" Nov 21 14:34:11 crc kubenswrapper[5133]: I1121 14:34:11.435317 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-nmtpr"] Nov 21 14:34:11 crc kubenswrapper[5133]: I1121 14:34:11.559806 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m8k5c\" (UniqueName: \"kubernetes.io/projected/65ca8b5f-0e35-4784-9dbb-a449057dd4cc-kube-api-access-m8k5c\") pod \"redhat-operators-nmtpr\" (UID: \"65ca8b5f-0e35-4784-9dbb-a449057dd4cc\") " pod="openshift-marketplace/redhat-operators-nmtpr" Nov 21 14:34:11 crc kubenswrapper[5133]: I1121 14:34:11.560058 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/65ca8b5f-0e35-4784-9dbb-a449057dd4cc-utilities\") pod \"redhat-operators-nmtpr\" (UID: \"65ca8b5f-0e35-4784-9dbb-a449057dd4cc\") " pod="openshift-marketplace/redhat-operators-nmtpr" Nov 21 14:34:11 crc kubenswrapper[5133]: I1121 14:34:11.560103 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/65ca8b5f-0e35-4784-9dbb-a449057dd4cc-catalog-content\") pod \"redhat-operators-nmtpr\" (UID: \"65ca8b5f-0e35-4784-9dbb-a449057dd4cc\") " pod="openshift-marketplace/redhat-operators-nmtpr" Nov 21 14:34:11 crc kubenswrapper[5133]: I1121 14:34:11.662106 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/65ca8b5f-0e35-4784-9dbb-a449057dd4cc-utilities\") pod \"redhat-operators-nmtpr\" (UID: \"65ca8b5f-0e35-4784-9dbb-a449057dd4cc\") " pod="openshift-marketplace/redhat-operators-nmtpr" Nov 21 14:34:11 crc kubenswrapper[5133]: I1121 14:34:11.662162 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/65ca8b5f-0e35-4784-9dbb-a449057dd4cc-catalog-content\") pod \"redhat-operators-nmtpr\" (UID: \"65ca8b5f-0e35-4784-9dbb-a449057dd4cc\") " pod="openshift-marketplace/redhat-operators-nmtpr" Nov 21 14:34:11 crc kubenswrapper[5133]: I1121 14:34:11.662255 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m8k5c\" (UniqueName: \"kubernetes.io/projected/65ca8b5f-0e35-4784-9dbb-a449057dd4cc-kube-api-access-m8k5c\") pod \"redhat-operators-nmtpr\" (UID: \"65ca8b5f-0e35-4784-9dbb-a449057dd4cc\") " pod="openshift-marketplace/redhat-operators-nmtpr" Nov 21 14:34:11 crc kubenswrapper[5133]: I1121 14:34:11.662978 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/65ca8b5f-0e35-4784-9dbb-a449057dd4cc-catalog-content\") pod \"redhat-operators-nmtpr\" (UID: \"65ca8b5f-0e35-4784-9dbb-a449057dd4cc\") " pod="openshift-marketplace/redhat-operators-nmtpr" Nov 21 14:34:11 crc kubenswrapper[5133]: I1121 14:34:11.663081 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/65ca8b5f-0e35-4784-9dbb-a449057dd4cc-utilities\") pod \"redhat-operators-nmtpr\" (UID: \"65ca8b5f-0e35-4784-9dbb-a449057dd4cc\") " pod="openshift-marketplace/redhat-operators-nmtpr" Nov 21 14:34:11 crc kubenswrapper[5133]: I1121 14:34:11.686491 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m8k5c\" (UniqueName: \"kubernetes.io/projected/65ca8b5f-0e35-4784-9dbb-a449057dd4cc-kube-api-access-m8k5c\") pod \"redhat-operators-nmtpr\" (UID: \"65ca8b5f-0e35-4784-9dbb-a449057dd4cc\") " pod="openshift-marketplace/redhat-operators-nmtpr" Nov 21 14:34:11 crc kubenswrapper[5133]: I1121 14:34:11.762129 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nmtpr" Nov 21 14:34:12 crc kubenswrapper[5133]: I1121 14:34:12.234043 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-nmtpr"] Nov 21 14:34:12 crc kubenswrapper[5133]: I1121 14:34:12.469962 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nmtpr" event={"ID":"65ca8b5f-0e35-4784-9dbb-a449057dd4cc","Type":"ContainerStarted","Data":"747e3f4a8538525f2d86228de1109ec866ff7cddb9d679eeccdbfb1cd396ce04"} Nov 21 14:34:12 crc kubenswrapper[5133]: I1121 14:34:12.470240 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nmtpr" event={"ID":"65ca8b5f-0e35-4784-9dbb-a449057dd4cc","Type":"ContainerStarted","Data":"588a19fd4110cc6e0301da86b7f830d36cd8ccb1e4bbf2c90435cc2f7dd1e768"} Nov 21 14:34:12 crc kubenswrapper[5133]: I1121 14:34:12.471281 5133 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 21 14:34:13 crc kubenswrapper[5133]: I1121 14:34:13.473881 5133 generic.go:334] "Generic (PLEG): container finished" podID="65ca8b5f-0e35-4784-9dbb-a449057dd4cc" containerID="747e3f4a8538525f2d86228de1109ec866ff7cddb9d679eeccdbfb1cd396ce04" exitCode=0 Nov 21 14:34:13 crc kubenswrapper[5133]: I1121 14:34:13.473917 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nmtpr" event={"ID":"65ca8b5f-0e35-4784-9dbb-a449057dd4cc","Type":"ContainerDied","Data":"747e3f4a8538525f2d86228de1109ec866ff7cddb9d679eeccdbfb1cd396ce04"} Nov 21 14:34:13 crc kubenswrapper[5133]: I1121 14:34:13.474284 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nmtpr" event={"ID":"65ca8b5f-0e35-4784-9dbb-a449057dd4cc","Type":"ContainerStarted","Data":"b916b5cbc8dfa313390b7070e5b57e4ac6e96883d8b8445534d950f49d5a02b7"} Nov 21 14:34:14 crc kubenswrapper[5133]: I1121 14:34:14.485742 5133 generic.go:334] "Generic (PLEG): container finished" podID="65ca8b5f-0e35-4784-9dbb-a449057dd4cc" containerID="b916b5cbc8dfa313390b7070e5b57e4ac6e96883d8b8445534d950f49d5a02b7" exitCode=0 Nov 21 14:34:14 crc kubenswrapper[5133]: I1121 14:34:14.485793 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nmtpr" event={"ID":"65ca8b5f-0e35-4784-9dbb-a449057dd4cc","Type":"ContainerDied","Data":"b916b5cbc8dfa313390b7070e5b57e4ac6e96883d8b8445534d950f49d5a02b7"} Nov 21 14:34:16 crc kubenswrapper[5133]: I1121 14:34:16.504167 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nmtpr" event={"ID":"65ca8b5f-0e35-4784-9dbb-a449057dd4cc","Type":"ContainerStarted","Data":"c345ec1467f2a9ae5cc1d0b3f94104c1680c9964d4c5d8df92064d2348257c64"} Nov 21 14:34:16 crc kubenswrapper[5133]: I1121 14:34:16.525377 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-nmtpr" podStartSLOduration=2.692148789 podStartE2EDuration="5.525360901s" podCreationTimestamp="2025-11-21 14:34:11 +0000 UTC" firstStartedPulling="2025-11-21 14:34:12.471061435 +0000 UTC m=+3112.268893683" lastFinishedPulling="2025-11-21 14:34:15.304273547 +0000 UTC m=+3115.102105795" observedRunningTime="2025-11-21 14:34:16.522895935 +0000 UTC m=+3116.320728203" watchObservedRunningTime="2025-11-21 14:34:16.525360901 +0000 UTC m=+3116.323193149" Nov 21 14:34:19 crc kubenswrapper[5133]: I1121 14:34:19.457899 5133 scope.go:117] "RemoveContainer" containerID="76b8364e4af45cbd19eaaf4b68ddf1c52013121c27d1586f3a2c762b2e050cef" Nov 21 14:34:19 crc kubenswrapper[5133]: E1121 14:34:19.458981 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:34:21 crc kubenswrapper[5133]: I1121 14:34:21.762309 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-nmtpr" Nov 21 14:34:21 crc kubenswrapper[5133]: I1121 14:34:21.762750 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-nmtpr" Nov 21 14:34:21 crc kubenswrapper[5133]: I1121 14:34:21.831524 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-nmtpr" Nov 21 14:34:22 crc kubenswrapper[5133]: I1121 14:34:22.624674 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-nmtpr" Nov 21 14:34:22 crc kubenswrapper[5133]: I1121 14:34:22.676405 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-nmtpr"] Nov 21 14:34:24 crc kubenswrapper[5133]: I1121 14:34:24.576488 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-nmtpr" podUID="65ca8b5f-0e35-4784-9dbb-a449057dd4cc" containerName="registry-server" containerID="cri-o://c345ec1467f2a9ae5cc1d0b3f94104c1680c9964d4c5d8df92064d2348257c64" gracePeriod=2 Nov 21 14:34:27 crc kubenswrapper[5133]: I1121 14:34:27.608479 5133 generic.go:334] "Generic (PLEG): container finished" podID="65ca8b5f-0e35-4784-9dbb-a449057dd4cc" containerID="c345ec1467f2a9ae5cc1d0b3f94104c1680c9964d4c5d8df92064d2348257c64" exitCode=0 Nov 21 14:34:27 crc kubenswrapper[5133]: I1121 14:34:27.608592 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nmtpr" event={"ID":"65ca8b5f-0e35-4784-9dbb-a449057dd4cc","Type":"ContainerDied","Data":"c345ec1467f2a9ae5cc1d0b3f94104c1680c9964d4c5d8df92064d2348257c64"} Nov 21 14:34:28 crc kubenswrapper[5133]: I1121 14:34:28.786479 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nmtpr" Nov 21 14:34:28 crc kubenswrapper[5133]: I1121 14:34:28.896430 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m8k5c\" (UniqueName: \"kubernetes.io/projected/65ca8b5f-0e35-4784-9dbb-a449057dd4cc-kube-api-access-m8k5c\") pod \"65ca8b5f-0e35-4784-9dbb-a449057dd4cc\" (UID: \"65ca8b5f-0e35-4784-9dbb-a449057dd4cc\") " Nov 21 14:34:28 crc kubenswrapper[5133]: I1121 14:34:28.896543 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/65ca8b5f-0e35-4784-9dbb-a449057dd4cc-utilities\") pod \"65ca8b5f-0e35-4784-9dbb-a449057dd4cc\" (UID: \"65ca8b5f-0e35-4784-9dbb-a449057dd4cc\") " Nov 21 14:34:28 crc kubenswrapper[5133]: I1121 14:34:28.896632 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/65ca8b5f-0e35-4784-9dbb-a449057dd4cc-catalog-content\") pod \"65ca8b5f-0e35-4784-9dbb-a449057dd4cc\" (UID: \"65ca8b5f-0e35-4784-9dbb-a449057dd4cc\") " Nov 21 14:34:28 crc kubenswrapper[5133]: I1121 14:34:28.898405 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/65ca8b5f-0e35-4784-9dbb-a449057dd4cc-utilities" (OuterVolumeSpecName: "utilities") pod "65ca8b5f-0e35-4784-9dbb-a449057dd4cc" (UID: "65ca8b5f-0e35-4784-9dbb-a449057dd4cc"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:34:28 crc kubenswrapper[5133]: I1121 14:34:28.907206 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/65ca8b5f-0e35-4784-9dbb-a449057dd4cc-kube-api-access-m8k5c" (OuterVolumeSpecName: "kube-api-access-m8k5c") pod "65ca8b5f-0e35-4784-9dbb-a449057dd4cc" (UID: "65ca8b5f-0e35-4784-9dbb-a449057dd4cc"). InnerVolumeSpecName "kube-api-access-m8k5c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:34:28 crc kubenswrapper[5133]: I1121 14:34:28.985122 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/65ca8b5f-0e35-4784-9dbb-a449057dd4cc-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "65ca8b5f-0e35-4784-9dbb-a449057dd4cc" (UID: "65ca8b5f-0e35-4784-9dbb-a449057dd4cc"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:34:28 crc kubenswrapper[5133]: I1121 14:34:28.998358 5133 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/65ca8b5f-0e35-4784-9dbb-a449057dd4cc-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 14:34:28 crc kubenswrapper[5133]: I1121 14:34:28.998393 5133 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/65ca8b5f-0e35-4784-9dbb-a449057dd4cc-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 14:34:28 crc kubenswrapper[5133]: I1121 14:34:28.998406 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m8k5c\" (UniqueName: \"kubernetes.io/projected/65ca8b5f-0e35-4784-9dbb-a449057dd4cc-kube-api-access-m8k5c\") on node \"crc\" DevicePath \"\"" Nov 21 14:34:29 crc kubenswrapper[5133]: I1121 14:34:29.644247 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nmtpr" event={"ID":"65ca8b5f-0e35-4784-9dbb-a449057dd4cc","Type":"ContainerDied","Data":"588a19fd4110cc6e0301da86b7f830d36cd8ccb1e4bbf2c90435cc2f7dd1e768"} Nov 21 14:34:29 crc kubenswrapper[5133]: I1121 14:34:29.644322 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nmtpr" Nov 21 14:34:29 crc kubenswrapper[5133]: I1121 14:34:29.644331 5133 scope.go:117] "RemoveContainer" containerID="c345ec1467f2a9ae5cc1d0b3f94104c1680c9964d4c5d8df92064d2348257c64" Nov 21 14:34:29 crc kubenswrapper[5133]: I1121 14:34:29.682984 5133 scope.go:117] "RemoveContainer" containerID="b916b5cbc8dfa313390b7070e5b57e4ac6e96883d8b8445534d950f49d5a02b7" Nov 21 14:34:29 crc kubenswrapper[5133]: I1121 14:34:29.690609 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-nmtpr"] Nov 21 14:34:29 crc kubenswrapper[5133]: I1121 14:34:29.699125 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-nmtpr"] Nov 21 14:34:29 crc kubenswrapper[5133]: I1121 14:34:29.725335 5133 scope.go:117] "RemoveContainer" containerID="747e3f4a8538525f2d86228de1109ec866ff7cddb9d679eeccdbfb1cd396ce04" Nov 21 14:34:30 crc kubenswrapper[5133]: I1121 14:34:30.457882 5133 scope.go:117] "RemoveContainer" containerID="76b8364e4af45cbd19eaaf4b68ddf1c52013121c27d1586f3a2c762b2e050cef" Nov 21 14:34:30 crc kubenswrapper[5133]: E1121 14:34:30.458372 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:34:30 crc kubenswrapper[5133]: I1121 14:34:30.472959 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="65ca8b5f-0e35-4784-9dbb-a449057dd4cc" path="/var/lib/kubelet/pods/65ca8b5f-0e35-4784-9dbb-a449057dd4cc/volumes" Nov 21 14:34:45 crc kubenswrapper[5133]: I1121 14:34:45.457796 5133 scope.go:117] "RemoveContainer" containerID="76b8364e4af45cbd19eaaf4b68ddf1c52013121c27d1586f3a2c762b2e050cef" Nov 21 14:34:45 crc kubenswrapper[5133]: E1121 14:34:45.458935 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:34:59 crc kubenswrapper[5133]: I1121 14:34:59.458551 5133 scope.go:117] "RemoveContainer" containerID="76b8364e4af45cbd19eaaf4b68ddf1c52013121c27d1586f3a2c762b2e050cef" Nov 21 14:34:59 crc kubenswrapper[5133]: E1121 14:34:59.459521 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:35:11 crc kubenswrapper[5133]: I1121 14:35:11.457937 5133 scope.go:117] "RemoveContainer" containerID="76b8364e4af45cbd19eaaf4b68ddf1c52013121c27d1586f3a2c762b2e050cef" Nov 21 14:35:11 crc kubenswrapper[5133]: E1121 14:35:11.458902 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:35:25 crc kubenswrapper[5133]: I1121 14:35:25.457964 5133 scope.go:117] "RemoveContainer" containerID="76b8364e4af45cbd19eaaf4b68ddf1c52013121c27d1586f3a2c762b2e050cef" Nov 21 14:35:25 crc kubenswrapper[5133]: E1121 14:35:25.458797 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:35:40 crc kubenswrapper[5133]: I1121 14:35:40.457431 5133 scope.go:117] "RemoveContainer" containerID="76b8364e4af45cbd19eaaf4b68ddf1c52013121c27d1586f3a2c762b2e050cef" Nov 21 14:35:40 crc kubenswrapper[5133]: E1121 14:35:40.459624 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:35:51 crc kubenswrapper[5133]: I1121 14:35:51.457190 5133 scope.go:117] "RemoveContainer" containerID="76b8364e4af45cbd19eaaf4b68ddf1c52013121c27d1586f3a2c762b2e050cef" Nov 21 14:35:51 crc kubenswrapper[5133]: E1121 14:35:51.458020 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:36:05 crc kubenswrapper[5133]: I1121 14:36:05.458389 5133 scope.go:117] "RemoveContainer" containerID="76b8364e4af45cbd19eaaf4b68ddf1c52013121c27d1586f3a2c762b2e050cef" Nov 21 14:36:05 crc kubenswrapper[5133]: E1121 14:36:05.459478 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:36:17 crc kubenswrapper[5133]: I1121 14:36:17.458690 5133 scope.go:117] "RemoveContainer" containerID="76b8364e4af45cbd19eaaf4b68ddf1c52013121c27d1586f3a2c762b2e050cef" Nov 21 14:36:17 crc kubenswrapper[5133]: E1121 14:36:17.459450 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:36:22 crc kubenswrapper[5133]: I1121 14:36:22.298443 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-4qd7d"] Nov 21 14:36:22 crc kubenswrapper[5133]: E1121 14:36:22.300144 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="65ca8b5f-0e35-4784-9dbb-a449057dd4cc" containerName="extract-content" Nov 21 14:36:22 crc kubenswrapper[5133]: I1121 14:36:22.300169 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="65ca8b5f-0e35-4784-9dbb-a449057dd4cc" containerName="extract-content" Nov 21 14:36:22 crc kubenswrapper[5133]: E1121 14:36:22.300181 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="65ca8b5f-0e35-4784-9dbb-a449057dd4cc" containerName="extract-utilities" Nov 21 14:36:22 crc kubenswrapper[5133]: I1121 14:36:22.300188 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="65ca8b5f-0e35-4784-9dbb-a449057dd4cc" containerName="extract-utilities" Nov 21 14:36:22 crc kubenswrapper[5133]: E1121 14:36:22.300213 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="65ca8b5f-0e35-4784-9dbb-a449057dd4cc" containerName="registry-server" Nov 21 14:36:22 crc kubenswrapper[5133]: I1121 14:36:22.300220 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="65ca8b5f-0e35-4784-9dbb-a449057dd4cc" containerName="registry-server" Nov 21 14:36:22 crc kubenswrapper[5133]: I1121 14:36:22.300645 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="65ca8b5f-0e35-4784-9dbb-a449057dd4cc" containerName="registry-server" Nov 21 14:36:22 crc kubenswrapper[5133]: I1121 14:36:22.302235 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4qd7d" Nov 21 14:36:22 crc kubenswrapper[5133]: I1121 14:36:22.318959 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4qd7d"] Nov 21 14:36:22 crc kubenswrapper[5133]: I1121 14:36:22.365034 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5d99ec39-1e1d-4ca6-b14a-1fb0cef37c81-catalog-content\") pod \"community-operators-4qd7d\" (UID: \"5d99ec39-1e1d-4ca6-b14a-1fb0cef37c81\") " pod="openshift-marketplace/community-operators-4qd7d" Nov 21 14:36:22 crc kubenswrapper[5133]: I1121 14:36:22.365190 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5d99ec39-1e1d-4ca6-b14a-1fb0cef37c81-utilities\") pod \"community-operators-4qd7d\" (UID: \"5d99ec39-1e1d-4ca6-b14a-1fb0cef37c81\") " pod="openshift-marketplace/community-operators-4qd7d" Nov 21 14:36:22 crc kubenswrapper[5133]: I1121 14:36:22.365245 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k75mn\" (UniqueName: \"kubernetes.io/projected/5d99ec39-1e1d-4ca6-b14a-1fb0cef37c81-kube-api-access-k75mn\") pod \"community-operators-4qd7d\" (UID: \"5d99ec39-1e1d-4ca6-b14a-1fb0cef37c81\") " pod="openshift-marketplace/community-operators-4qd7d" Nov 21 14:36:22 crc kubenswrapper[5133]: I1121 14:36:22.466451 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5d99ec39-1e1d-4ca6-b14a-1fb0cef37c81-catalog-content\") pod \"community-operators-4qd7d\" (UID: \"5d99ec39-1e1d-4ca6-b14a-1fb0cef37c81\") " pod="openshift-marketplace/community-operators-4qd7d" Nov 21 14:36:22 crc kubenswrapper[5133]: I1121 14:36:22.466561 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5d99ec39-1e1d-4ca6-b14a-1fb0cef37c81-utilities\") pod \"community-operators-4qd7d\" (UID: \"5d99ec39-1e1d-4ca6-b14a-1fb0cef37c81\") " pod="openshift-marketplace/community-operators-4qd7d" Nov 21 14:36:22 crc kubenswrapper[5133]: I1121 14:36:22.466601 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k75mn\" (UniqueName: \"kubernetes.io/projected/5d99ec39-1e1d-4ca6-b14a-1fb0cef37c81-kube-api-access-k75mn\") pod \"community-operators-4qd7d\" (UID: \"5d99ec39-1e1d-4ca6-b14a-1fb0cef37c81\") " pod="openshift-marketplace/community-operators-4qd7d" Nov 21 14:36:22 crc kubenswrapper[5133]: I1121 14:36:22.467706 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5d99ec39-1e1d-4ca6-b14a-1fb0cef37c81-catalog-content\") pod \"community-operators-4qd7d\" (UID: \"5d99ec39-1e1d-4ca6-b14a-1fb0cef37c81\") " pod="openshift-marketplace/community-operators-4qd7d" Nov 21 14:36:22 crc kubenswrapper[5133]: I1121 14:36:22.467849 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5d99ec39-1e1d-4ca6-b14a-1fb0cef37c81-utilities\") pod \"community-operators-4qd7d\" (UID: \"5d99ec39-1e1d-4ca6-b14a-1fb0cef37c81\") " pod="openshift-marketplace/community-operators-4qd7d" Nov 21 14:36:22 crc kubenswrapper[5133]: I1121 14:36:22.491726 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k75mn\" (UniqueName: \"kubernetes.io/projected/5d99ec39-1e1d-4ca6-b14a-1fb0cef37c81-kube-api-access-k75mn\") pod \"community-operators-4qd7d\" (UID: \"5d99ec39-1e1d-4ca6-b14a-1fb0cef37c81\") " pod="openshift-marketplace/community-operators-4qd7d" Nov 21 14:36:22 crc kubenswrapper[5133]: I1121 14:36:22.627932 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4qd7d" Nov 21 14:36:23 crc kubenswrapper[5133]: I1121 14:36:23.178629 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4qd7d"] Nov 21 14:36:23 crc kubenswrapper[5133]: I1121 14:36:23.193392 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4qd7d" event={"ID":"5d99ec39-1e1d-4ca6-b14a-1fb0cef37c81","Type":"ContainerStarted","Data":"42c87e853440d1c0d0223a74e1a79f22683e13bc6a2a157bac12ccb8b190572e"} Nov 21 14:36:24 crc kubenswrapper[5133]: I1121 14:36:24.205073 5133 generic.go:334] "Generic (PLEG): container finished" podID="5d99ec39-1e1d-4ca6-b14a-1fb0cef37c81" containerID="498bc9d998ccf29a901606b33024c1b7f1977018bd3a7e442c86ff96953e2dbc" exitCode=0 Nov 21 14:36:24 crc kubenswrapper[5133]: I1121 14:36:24.205166 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4qd7d" event={"ID":"5d99ec39-1e1d-4ca6-b14a-1fb0cef37c81","Type":"ContainerDied","Data":"498bc9d998ccf29a901606b33024c1b7f1977018bd3a7e442c86ff96953e2dbc"} Nov 21 14:36:26 crc kubenswrapper[5133]: I1121 14:36:26.226022 5133 generic.go:334] "Generic (PLEG): container finished" podID="5d99ec39-1e1d-4ca6-b14a-1fb0cef37c81" containerID="ab3e841df561dc60030d087d795fd83aa2d6144a9c9dc815ea0f9f57d82af2b9" exitCode=0 Nov 21 14:36:26 crc kubenswrapper[5133]: I1121 14:36:26.226116 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4qd7d" event={"ID":"5d99ec39-1e1d-4ca6-b14a-1fb0cef37c81","Type":"ContainerDied","Data":"ab3e841df561dc60030d087d795fd83aa2d6144a9c9dc815ea0f9f57d82af2b9"} Nov 21 14:36:27 crc kubenswrapper[5133]: I1121 14:36:27.239076 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4qd7d" event={"ID":"5d99ec39-1e1d-4ca6-b14a-1fb0cef37c81","Type":"ContainerStarted","Data":"336b8636bd22a3fae2b26eb5d8a04e513eec0e6eb275e168751db8117f14badb"} Nov 21 14:36:27 crc kubenswrapper[5133]: I1121 14:36:27.269419 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-4qd7d" podStartSLOduration=2.47305824 podStartE2EDuration="5.269399036s" podCreationTimestamp="2025-11-21 14:36:22 +0000 UTC" firstStartedPulling="2025-11-21 14:36:24.207510241 +0000 UTC m=+3244.005342489" lastFinishedPulling="2025-11-21 14:36:27.003851037 +0000 UTC m=+3246.801683285" observedRunningTime="2025-11-21 14:36:27.266860538 +0000 UTC m=+3247.064692786" watchObservedRunningTime="2025-11-21 14:36:27.269399036 +0000 UTC m=+3247.067231284" Nov 21 14:36:31 crc kubenswrapper[5133]: I1121 14:36:31.458684 5133 scope.go:117] "RemoveContainer" containerID="76b8364e4af45cbd19eaaf4b68ddf1c52013121c27d1586f3a2c762b2e050cef" Nov 21 14:36:31 crc kubenswrapper[5133]: E1121 14:36:31.460048 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:36:32 crc kubenswrapper[5133]: I1121 14:36:32.628967 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-4qd7d" Nov 21 14:36:32 crc kubenswrapper[5133]: I1121 14:36:32.629406 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-4qd7d" Nov 21 14:36:32 crc kubenswrapper[5133]: I1121 14:36:32.676852 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-4qd7d" Nov 21 14:36:33 crc kubenswrapper[5133]: I1121 14:36:33.347848 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-4qd7d" Nov 21 14:36:33 crc kubenswrapper[5133]: I1121 14:36:33.397272 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-4qd7d"] Nov 21 14:36:35 crc kubenswrapper[5133]: I1121 14:36:35.312569 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-4qd7d" podUID="5d99ec39-1e1d-4ca6-b14a-1fb0cef37c81" containerName="registry-server" containerID="cri-o://336b8636bd22a3fae2b26eb5d8a04e513eec0e6eb275e168751db8117f14badb" gracePeriod=2 Nov 21 14:36:35 crc kubenswrapper[5133]: I1121 14:36:35.771986 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4qd7d" Nov 21 14:36:35 crc kubenswrapper[5133]: I1121 14:36:35.921643 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k75mn\" (UniqueName: \"kubernetes.io/projected/5d99ec39-1e1d-4ca6-b14a-1fb0cef37c81-kube-api-access-k75mn\") pod \"5d99ec39-1e1d-4ca6-b14a-1fb0cef37c81\" (UID: \"5d99ec39-1e1d-4ca6-b14a-1fb0cef37c81\") " Nov 21 14:36:35 crc kubenswrapper[5133]: I1121 14:36:35.921690 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5d99ec39-1e1d-4ca6-b14a-1fb0cef37c81-catalog-content\") pod \"5d99ec39-1e1d-4ca6-b14a-1fb0cef37c81\" (UID: \"5d99ec39-1e1d-4ca6-b14a-1fb0cef37c81\") " Nov 21 14:36:35 crc kubenswrapper[5133]: I1121 14:36:35.921813 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5d99ec39-1e1d-4ca6-b14a-1fb0cef37c81-utilities\") pod \"5d99ec39-1e1d-4ca6-b14a-1fb0cef37c81\" (UID: \"5d99ec39-1e1d-4ca6-b14a-1fb0cef37c81\") " Nov 21 14:36:35 crc kubenswrapper[5133]: I1121 14:36:35.922981 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5d99ec39-1e1d-4ca6-b14a-1fb0cef37c81-utilities" (OuterVolumeSpecName: "utilities") pod "5d99ec39-1e1d-4ca6-b14a-1fb0cef37c81" (UID: "5d99ec39-1e1d-4ca6-b14a-1fb0cef37c81"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:36:35 crc kubenswrapper[5133]: I1121 14:36:35.931291 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5d99ec39-1e1d-4ca6-b14a-1fb0cef37c81-kube-api-access-k75mn" (OuterVolumeSpecName: "kube-api-access-k75mn") pod "5d99ec39-1e1d-4ca6-b14a-1fb0cef37c81" (UID: "5d99ec39-1e1d-4ca6-b14a-1fb0cef37c81"). InnerVolumeSpecName "kube-api-access-k75mn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:36:36 crc kubenswrapper[5133]: I1121 14:36:36.004134 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5d99ec39-1e1d-4ca6-b14a-1fb0cef37c81-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5d99ec39-1e1d-4ca6-b14a-1fb0cef37c81" (UID: "5d99ec39-1e1d-4ca6-b14a-1fb0cef37c81"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:36:36 crc kubenswrapper[5133]: I1121 14:36:36.023723 5133 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5d99ec39-1e1d-4ca6-b14a-1fb0cef37c81-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 14:36:36 crc kubenswrapper[5133]: I1121 14:36:36.023767 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k75mn\" (UniqueName: \"kubernetes.io/projected/5d99ec39-1e1d-4ca6-b14a-1fb0cef37c81-kube-api-access-k75mn\") on node \"crc\" DevicePath \"\"" Nov 21 14:36:36 crc kubenswrapper[5133]: I1121 14:36:36.023781 5133 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5d99ec39-1e1d-4ca6-b14a-1fb0cef37c81-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 14:36:36 crc kubenswrapper[5133]: I1121 14:36:36.339760 5133 generic.go:334] "Generic (PLEG): container finished" podID="5d99ec39-1e1d-4ca6-b14a-1fb0cef37c81" containerID="336b8636bd22a3fae2b26eb5d8a04e513eec0e6eb275e168751db8117f14badb" exitCode=0 Nov 21 14:36:36 crc kubenswrapper[5133]: I1121 14:36:36.339825 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4qd7d" event={"ID":"5d99ec39-1e1d-4ca6-b14a-1fb0cef37c81","Type":"ContainerDied","Data":"336b8636bd22a3fae2b26eb5d8a04e513eec0e6eb275e168751db8117f14badb"} Nov 21 14:36:36 crc kubenswrapper[5133]: I1121 14:36:36.339873 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4qd7d" event={"ID":"5d99ec39-1e1d-4ca6-b14a-1fb0cef37c81","Type":"ContainerDied","Data":"42c87e853440d1c0d0223a74e1a79f22683e13bc6a2a157bac12ccb8b190572e"} Nov 21 14:36:36 crc kubenswrapper[5133]: I1121 14:36:36.339894 5133 scope.go:117] "RemoveContainer" containerID="336b8636bd22a3fae2b26eb5d8a04e513eec0e6eb275e168751db8117f14badb" Nov 21 14:36:36 crc kubenswrapper[5133]: I1121 14:36:36.340115 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4qd7d" Nov 21 14:36:36 crc kubenswrapper[5133]: I1121 14:36:36.384602 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-4qd7d"] Nov 21 14:36:36 crc kubenswrapper[5133]: I1121 14:36:36.388300 5133 scope.go:117] "RemoveContainer" containerID="ab3e841df561dc60030d087d795fd83aa2d6144a9c9dc815ea0f9f57d82af2b9" Nov 21 14:36:36 crc kubenswrapper[5133]: I1121 14:36:36.394056 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-4qd7d"] Nov 21 14:36:36 crc kubenswrapper[5133]: I1121 14:36:36.430383 5133 scope.go:117] "RemoveContainer" containerID="498bc9d998ccf29a901606b33024c1b7f1977018bd3a7e442c86ff96953e2dbc" Nov 21 14:36:36 crc kubenswrapper[5133]: I1121 14:36:36.455707 5133 scope.go:117] "RemoveContainer" containerID="336b8636bd22a3fae2b26eb5d8a04e513eec0e6eb275e168751db8117f14badb" Nov 21 14:36:36 crc kubenswrapper[5133]: E1121 14:36:36.456284 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"336b8636bd22a3fae2b26eb5d8a04e513eec0e6eb275e168751db8117f14badb\": container with ID starting with 336b8636bd22a3fae2b26eb5d8a04e513eec0e6eb275e168751db8117f14badb not found: ID does not exist" containerID="336b8636bd22a3fae2b26eb5d8a04e513eec0e6eb275e168751db8117f14badb" Nov 21 14:36:36 crc kubenswrapper[5133]: I1121 14:36:36.456313 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"336b8636bd22a3fae2b26eb5d8a04e513eec0e6eb275e168751db8117f14badb"} err="failed to get container status \"336b8636bd22a3fae2b26eb5d8a04e513eec0e6eb275e168751db8117f14badb\": rpc error: code = NotFound desc = could not find container \"336b8636bd22a3fae2b26eb5d8a04e513eec0e6eb275e168751db8117f14badb\": container with ID starting with 336b8636bd22a3fae2b26eb5d8a04e513eec0e6eb275e168751db8117f14badb not found: ID does not exist" Nov 21 14:36:36 crc kubenswrapper[5133]: I1121 14:36:36.456339 5133 scope.go:117] "RemoveContainer" containerID="ab3e841df561dc60030d087d795fd83aa2d6144a9c9dc815ea0f9f57d82af2b9" Nov 21 14:36:36 crc kubenswrapper[5133]: E1121 14:36:36.456812 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ab3e841df561dc60030d087d795fd83aa2d6144a9c9dc815ea0f9f57d82af2b9\": container with ID starting with ab3e841df561dc60030d087d795fd83aa2d6144a9c9dc815ea0f9f57d82af2b9 not found: ID does not exist" containerID="ab3e841df561dc60030d087d795fd83aa2d6144a9c9dc815ea0f9f57d82af2b9" Nov 21 14:36:36 crc kubenswrapper[5133]: I1121 14:36:36.456873 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ab3e841df561dc60030d087d795fd83aa2d6144a9c9dc815ea0f9f57d82af2b9"} err="failed to get container status \"ab3e841df561dc60030d087d795fd83aa2d6144a9c9dc815ea0f9f57d82af2b9\": rpc error: code = NotFound desc = could not find container \"ab3e841df561dc60030d087d795fd83aa2d6144a9c9dc815ea0f9f57d82af2b9\": container with ID starting with ab3e841df561dc60030d087d795fd83aa2d6144a9c9dc815ea0f9f57d82af2b9 not found: ID does not exist" Nov 21 14:36:36 crc kubenswrapper[5133]: I1121 14:36:36.456905 5133 scope.go:117] "RemoveContainer" containerID="498bc9d998ccf29a901606b33024c1b7f1977018bd3a7e442c86ff96953e2dbc" Nov 21 14:36:36 crc kubenswrapper[5133]: E1121 14:36:36.457555 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"498bc9d998ccf29a901606b33024c1b7f1977018bd3a7e442c86ff96953e2dbc\": container with ID starting with 498bc9d998ccf29a901606b33024c1b7f1977018bd3a7e442c86ff96953e2dbc not found: ID does not exist" containerID="498bc9d998ccf29a901606b33024c1b7f1977018bd3a7e442c86ff96953e2dbc" Nov 21 14:36:36 crc kubenswrapper[5133]: I1121 14:36:36.457603 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"498bc9d998ccf29a901606b33024c1b7f1977018bd3a7e442c86ff96953e2dbc"} err="failed to get container status \"498bc9d998ccf29a901606b33024c1b7f1977018bd3a7e442c86ff96953e2dbc\": rpc error: code = NotFound desc = could not find container \"498bc9d998ccf29a901606b33024c1b7f1977018bd3a7e442c86ff96953e2dbc\": container with ID starting with 498bc9d998ccf29a901606b33024c1b7f1977018bd3a7e442c86ff96953e2dbc not found: ID does not exist" Nov 21 14:36:36 crc kubenswrapper[5133]: I1121 14:36:36.469257 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5d99ec39-1e1d-4ca6-b14a-1fb0cef37c81" path="/var/lib/kubelet/pods/5d99ec39-1e1d-4ca6-b14a-1fb0cef37c81/volumes" Nov 21 14:36:45 crc kubenswrapper[5133]: I1121 14:36:45.458546 5133 scope.go:117] "RemoveContainer" containerID="76b8364e4af45cbd19eaaf4b68ddf1c52013121c27d1586f3a2c762b2e050cef" Nov 21 14:36:45 crc kubenswrapper[5133]: E1121 14:36:45.459650 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:36:56 crc kubenswrapper[5133]: I1121 14:36:56.458286 5133 scope.go:117] "RemoveContainer" containerID="76b8364e4af45cbd19eaaf4b68ddf1c52013121c27d1586f3a2c762b2e050cef" Nov 21 14:36:56 crc kubenswrapper[5133]: E1121 14:36:56.459207 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:37:09 crc kubenswrapper[5133]: I1121 14:37:09.458641 5133 scope.go:117] "RemoveContainer" containerID="76b8364e4af45cbd19eaaf4b68ddf1c52013121c27d1586f3a2c762b2e050cef" Nov 21 14:37:09 crc kubenswrapper[5133]: E1121 14:37:09.459524 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:37:23 crc kubenswrapper[5133]: I1121 14:37:23.457315 5133 scope.go:117] "RemoveContainer" containerID="76b8364e4af45cbd19eaaf4b68ddf1c52013121c27d1586f3a2c762b2e050cef" Nov 21 14:37:23 crc kubenswrapper[5133]: E1121 14:37:23.458137 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:37:35 crc kubenswrapper[5133]: I1121 14:37:35.457422 5133 scope.go:117] "RemoveContainer" containerID="76b8364e4af45cbd19eaaf4b68ddf1c52013121c27d1586f3a2c762b2e050cef" Nov 21 14:37:35 crc kubenswrapper[5133]: E1121 14:37:35.458565 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:37:47 crc kubenswrapper[5133]: I1121 14:37:47.458217 5133 scope.go:117] "RemoveContainer" containerID="76b8364e4af45cbd19eaaf4b68ddf1c52013121c27d1586f3a2c762b2e050cef" Nov 21 14:37:47 crc kubenswrapper[5133]: E1121 14:37:47.459035 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:38:01 crc kubenswrapper[5133]: I1121 14:38:01.457864 5133 scope.go:117] "RemoveContainer" containerID="76b8364e4af45cbd19eaaf4b68ddf1c52013121c27d1586f3a2c762b2e050cef" Nov 21 14:38:02 crc kubenswrapper[5133]: I1121 14:38:02.202219 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" event={"ID":"52f5a729-05d1-4f84-a216-1df3233af57d","Type":"ContainerStarted","Data":"142b6de6b55e03cb42254cbecc2c0c858a38432be696d3f2b827d387bb6c86eb"} Nov 21 14:39:18 crc kubenswrapper[5133]: I1121 14:39:18.958388 5133 generic.go:334] "Generic (PLEG): container finished" podID="7e411878-ed75-43cd-9684-6f5bc9f58f1f" containerID="bc7ebb8632065ca0e5d1328b2d111ad05257c467fb01ca19b7f787a108c2d869" exitCode=0 Nov 21 14:39:18 crc kubenswrapper[5133]: I1121 14:39:18.958519 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5kst7" event={"ID":"7e411878-ed75-43cd-9684-6f5bc9f58f1f","Type":"ContainerDied","Data":"bc7ebb8632065ca0e5d1328b2d111ad05257c467fb01ca19b7f787a108c2d869"} Nov 21 14:39:20 crc kubenswrapper[5133]: I1121 14:39:20.348310 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5kst7" Nov 21 14:39:20 crc kubenswrapper[5133]: I1121 14:39:20.356665 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7e411878-ed75-43cd-9684-6f5bc9f58f1f-ssh-key\") pod \"7e411878-ed75-43cd-9684-6f5bc9f58f1f\" (UID: \"7e411878-ed75-43cd-9684-6f5bc9f58f1f\") " Nov 21 14:39:20 crc kubenswrapper[5133]: I1121 14:39:20.357388 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7e411878-ed75-43cd-9684-6f5bc9f58f1f-inventory\") pod \"7e411878-ed75-43cd-9684-6f5bc9f58f1f\" (UID: \"7e411878-ed75-43cd-9684-6f5bc9f58f1f\") " Nov 21 14:39:20 crc kubenswrapper[5133]: I1121 14:39:20.357529 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/7e411878-ed75-43cd-9684-6f5bc9f58f1f-libvirt-secret-0\") pod \"7e411878-ed75-43cd-9684-6f5bc9f58f1f\" (UID: \"7e411878-ed75-43cd-9684-6f5bc9f58f1f\") " Nov 21 14:39:20 crc kubenswrapper[5133]: I1121 14:39:20.357628 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/7e411878-ed75-43cd-9684-6f5bc9f58f1f-ceph\") pod \"7e411878-ed75-43cd-9684-6f5bc9f58f1f\" (UID: \"7e411878-ed75-43cd-9684-6f5bc9f58f1f\") " Nov 21 14:39:20 crc kubenswrapper[5133]: I1121 14:39:20.357717 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e411878-ed75-43cd-9684-6f5bc9f58f1f-libvirt-combined-ca-bundle\") pod \"7e411878-ed75-43cd-9684-6f5bc9f58f1f\" (UID: \"7e411878-ed75-43cd-9684-6f5bc9f58f1f\") " Nov 21 14:39:20 crc kubenswrapper[5133]: I1121 14:39:20.357789 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9zx6f\" (UniqueName: \"kubernetes.io/projected/7e411878-ed75-43cd-9684-6f5bc9f58f1f-kube-api-access-9zx6f\") pod \"7e411878-ed75-43cd-9684-6f5bc9f58f1f\" (UID: \"7e411878-ed75-43cd-9684-6f5bc9f58f1f\") " Nov 21 14:39:20 crc kubenswrapper[5133]: I1121 14:39:20.364168 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e411878-ed75-43cd-9684-6f5bc9f58f1f-ceph" (OuterVolumeSpecName: "ceph") pod "7e411878-ed75-43cd-9684-6f5bc9f58f1f" (UID: "7e411878-ed75-43cd-9684-6f5bc9f58f1f"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:39:20 crc kubenswrapper[5133]: I1121 14:39:20.368939 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7e411878-ed75-43cd-9684-6f5bc9f58f1f-kube-api-access-9zx6f" (OuterVolumeSpecName: "kube-api-access-9zx6f") pod "7e411878-ed75-43cd-9684-6f5bc9f58f1f" (UID: "7e411878-ed75-43cd-9684-6f5bc9f58f1f"). InnerVolumeSpecName "kube-api-access-9zx6f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:39:20 crc kubenswrapper[5133]: I1121 14:39:20.381797 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e411878-ed75-43cd-9684-6f5bc9f58f1f-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "7e411878-ed75-43cd-9684-6f5bc9f58f1f" (UID: "7e411878-ed75-43cd-9684-6f5bc9f58f1f"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:39:20 crc kubenswrapper[5133]: I1121 14:39:20.393585 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e411878-ed75-43cd-9684-6f5bc9f58f1f-inventory" (OuterVolumeSpecName: "inventory") pod "7e411878-ed75-43cd-9684-6f5bc9f58f1f" (UID: "7e411878-ed75-43cd-9684-6f5bc9f58f1f"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:39:20 crc kubenswrapper[5133]: I1121 14:39:20.394746 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e411878-ed75-43cd-9684-6f5bc9f58f1f-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "7e411878-ed75-43cd-9684-6f5bc9f58f1f" (UID: "7e411878-ed75-43cd-9684-6f5bc9f58f1f"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:39:20 crc kubenswrapper[5133]: I1121 14:39:20.396901 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e411878-ed75-43cd-9684-6f5bc9f58f1f-libvirt-secret-0" (OuterVolumeSpecName: "libvirt-secret-0") pod "7e411878-ed75-43cd-9684-6f5bc9f58f1f" (UID: "7e411878-ed75-43cd-9684-6f5bc9f58f1f"). InnerVolumeSpecName "libvirt-secret-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:39:20 crc kubenswrapper[5133]: I1121 14:39:20.460488 5133 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/7e411878-ed75-43cd-9684-6f5bc9f58f1f-ceph\") on node \"crc\" DevicePath \"\"" Nov 21 14:39:20 crc kubenswrapper[5133]: I1121 14:39:20.460638 5133 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e411878-ed75-43cd-9684-6f5bc9f58f1f-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 14:39:20 crc kubenswrapper[5133]: I1121 14:39:20.460696 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9zx6f\" (UniqueName: \"kubernetes.io/projected/7e411878-ed75-43cd-9684-6f5bc9f58f1f-kube-api-access-9zx6f\") on node \"crc\" DevicePath \"\"" Nov 21 14:39:20 crc kubenswrapper[5133]: I1121 14:39:20.460775 5133 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7e411878-ed75-43cd-9684-6f5bc9f58f1f-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 21 14:39:20 crc kubenswrapper[5133]: I1121 14:39:20.460829 5133 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7e411878-ed75-43cd-9684-6f5bc9f58f1f-inventory\") on node \"crc\" DevicePath \"\"" Nov 21 14:39:20 crc kubenswrapper[5133]: I1121 14:39:20.460903 5133 reconciler_common.go:293] "Volume detached for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/7e411878-ed75-43cd-9684-6f5bc9f58f1f-libvirt-secret-0\") on node \"crc\" DevicePath \"\"" Nov 21 14:39:20 crc kubenswrapper[5133]: I1121 14:39:20.983906 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5kst7" event={"ID":"7e411878-ed75-43cd-9684-6f5bc9f58f1f","Type":"ContainerDied","Data":"6a27296a8c909627ccffbc46a1e4833004aa473ba8e85c80ca6cb1244a69f1eb"} Nov 21 14:39:20 crc kubenswrapper[5133]: I1121 14:39:20.983973 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6a27296a8c909627ccffbc46a1e4833004aa473ba8e85c80ca6cb1244a69f1eb" Nov 21 14:39:20 crc kubenswrapper[5133]: I1121 14:39:20.983975 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-5kst7" Nov 21 14:39:21 crc kubenswrapper[5133]: I1121 14:39:21.084289 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb"] Nov 21 14:39:21 crc kubenswrapper[5133]: E1121 14:39:21.084931 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d99ec39-1e1d-4ca6-b14a-1fb0cef37c81" containerName="registry-server" Nov 21 14:39:21 crc kubenswrapper[5133]: I1121 14:39:21.085026 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d99ec39-1e1d-4ca6-b14a-1fb0cef37c81" containerName="registry-server" Nov 21 14:39:21 crc kubenswrapper[5133]: E1121 14:39:21.085096 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d99ec39-1e1d-4ca6-b14a-1fb0cef37c81" containerName="extract-content" Nov 21 14:39:21 crc kubenswrapper[5133]: I1121 14:39:21.085147 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d99ec39-1e1d-4ca6-b14a-1fb0cef37c81" containerName="extract-content" Nov 21 14:39:21 crc kubenswrapper[5133]: E1121 14:39:21.085203 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e411878-ed75-43cd-9684-6f5bc9f58f1f" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 21 14:39:21 crc kubenswrapper[5133]: I1121 14:39:21.085287 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e411878-ed75-43cd-9684-6f5bc9f58f1f" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 21 14:39:21 crc kubenswrapper[5133]: E1121 14:39:21.085348 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d99ec39-1e1d-4ca6-b14a-1fb0cef37c81" containerName="extract-utilities" Nov 21 14:39:21 crc kubenswrapper[5133]: I1121 14:39:21.085400 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d99ec39-1e1d-4ca6-b14a-1fb0cef37c81" containerName="extract-utilities" Nov 21 14:39:21 crc kubenswrapper[5133]: I1121 14:39:21.085612 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="5d99ec39-1e1d-4ca6-b14a-1fb0cef37c81" containerName="registry-server" Nov 21 14:39:21 crc kubenswrapper[5133]: I1121 14:39:21.085693 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="7e411878-ed75-43cd-9684-6f5bc9f58f1f" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 21 14:39:21 crc kubenswrapper[5133]: I1121 14:39:21.086545 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb" Nov 21 14:39:21 crc kubenswrapper[5133]: I1121 14:39:21.088623 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-migration-ssh-key" Nov 21 14:39:21 crc kubenswrapper[5133]: I1121 14:39:21.088629 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ceph-nova" Nov 21 14:39:21 crc kubenswrapper[5133]: I1121 14:39:21.088766 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-ztmk4" Nov 21 14:39:21 crc kubenswrapper[5133]: I1121 14:39:21.088924 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"nova-extra-config" Nov 21 14:39:21 crc kubenswrapper[5133]: I1121 14:39:21.089187 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 21 14:39:21 crc kubenswrapper[5133]: I1121 14:39:21.089519 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 21 14:39:21 crc kubenswrapper[5133]: I1121 14:39:21.089587 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-compute-config" Nov 21 14:39:21 crc kubenswrapper[5133]: I1121 14:39:21.090094 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 21 14:39:21 crc kubenswrapper[5133]: I1121 14:39:21.090120 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 21 14:39:21 crc kubenswrapper[5133]: I1121 14:39:21.100024 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb"] Nov 21 14:39:21 crc kubenswrapper[5133]: I1121 14:39:21.172581 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/25ee67bf-c176-4cdc-b2e8-1b36b72ff88e-inventory\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb\" (UID: \"25ee67bf-c176-4cdc-b2e8-1b36b72ff88e\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb" Nov 21 14:39:21 crc kubenswrapper[5133]: I1121 14:39:21.172646 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6r24h\" (UniqueName: \"kubernetes.io/projected/25ee67bf-c176-4cdc-b2e8-1b36b72ff88e-kube-api-access-6r24h\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb\" (UID: \"25ee67bf-c176-4cdc-b2e8-1b36b72ff88e\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb" Nov 21 14:39:21 crc kubenswrapper[5133]: I1121 14:39:21.172721 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/25ee67bf-c176-4cdc-b2e8-1b36b72ff88e-nova-migration-ssh-key-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb\" (UID: \"25ee67bf-c176-4cdc-b2e8-1b36b72ff88e\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb" Nov 21 14:39:21 crc kubenswrapper[5133]: I1121 14:39:21.172760 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/25ee67bf-c176-4cdc-b2e8-1b36b72ff88e-nova-extra-config-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb\" (UID: \"25ee67bf-c176-4cdc-b2e8-1b36b72ff88e\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb" Nov 21 14:39:21 crc kubenswrapper[5133]: I1121 14:39:21.172779 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph-nova-0\" (UniqueName: \"kubernetes.io/configmap/25ee67bf-c176-4cdc-b2e8-1b36b72ff88e-ceph-nova-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb\" (UID: \"25ee67bf-c176-4cdc-b2e8-1b36b72ff88e\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb" Nov 21 14:39:21 crc kubenswrapper[5133]: I1121 14:39:21.172810 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/25ee67bf-c176-4cdc-b2e8-1b36b72ff88e-nova-cell1-compute-config-1\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb\" (UID: \"25ee67bf-c176-4cdc-b2e8-1b36b72ff88e\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb" Nov 21 14:39:21 crc kubenswrapper[5133]: I1121 14:39:21.173061 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/25ee67bf-c176-4cdc-b2e8-1b36b72ff88e-ceph\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb\" (UID: \"25ee67bf-c176-4cdc-b2e8-1b36b72ff88e\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb" Nov 21 14:39:21 crc kubenswrapper[5133]: I1121 14:39:21.173154 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/25ee67bf-c176-4cdc-b2e8-1b36b72ff88e-nova-cell1-compute-config-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb\" (UID: \"25ee67bf-c176-4cdc-b2e8-1b36b72ff88e\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb" Nov 21 14:39:21 crc kubenswrapper[5133]: I1121 14:39:21.173187 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/25ee67bf-c176-4cdc-b2e8-1b36b72ff88e-ssh-key\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb\" (UID: \"25ee67bf-c176-4cdc-b2e8-1b36b72ff88e\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb" Nov 21 14:39:21 crc kubenswrapper[5133]: I1121 14:39:21.173210 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-custom-ceph-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/25ee67bf-c176-4cdc-b2e8-1b36b72ff88e-nova-custom-ceph-combined-ca-bundle\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb\" (UID: \"25ee67bf-c176-4cdc-b2e8-1b36b72ff88e\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb" Nov 21 14:39:21 crc kubenswrapper[5133]: I1121 14:39:21.173237 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/25ee67bf-c176-4cdc-b2e8-1b36b72ff88e-nova-migration-ssh-key-1\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb\" (UID: \"25ee67bf-c176-4cdc-b2e8-1b36b72ff88e\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb" Nov 21 14:39:21 crc kubenswrapper[5133]: I1121 14:39:21.275318 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/25ee67bf-c176-4cdc-b2e8-1b36b72ff88e-ceph\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb\" (UID: \"25ee67bf-c176-4cdc-b2e8-1b36b72ff88e\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb" Nov 21 14:39:21 crc kubenswrapper[5133]: I1121 14:39:21.275410 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/25ee67bf-c176-4cdc-b2e8-1b36b72ff88e-nova-cell1-compute-config-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb\" (UID: \"25ee67bf-c176-4cdc-b2e8-1b36b72ff88e\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb" Nov 21 14:39:21 crc kubenswrapper[5133]: I1121 14:39:21.275442 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/25ee67bf-c176-4cdc-b2e8-1b36b72ff88e-ssh-key\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb\" (UID: \"25ee67bf-c176-4cdc-b2e8-1b36b72ff88e\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb" Nov 21 14:39:21 crc kubenswrapper[5133]: I1121 14:39:21.275462 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-custom-ceph-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/25ee67bf-c176-4cdc-b2e8-1b36b72ff88e-nova-custom-ceph-combined-ca-bundle\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb\" (UID: \"25ee67bf-c176-4cdc-b2e8-1b36b72ff88e\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb" Nov 21 14:39:21 crc kubenswrapper[5133]: I1121 14:39:21.275481 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/25ee67bf-c176-4cdc-b2e8-1b36b72ff88e-nova-migration-ssh-key-1\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb\" (UID: \"25ee67bf-c176-4cdc-b2e8-1b36b72ff88e\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb" Nov 21 14:39:21 crc kubenswrapper[5133]: I1121 14:39:21.275677 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/25ee67bf-c176-4cdc-b2e8-1b36b72ff88e-inventory\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb\" (UID: \"25ee67bf-c176-4cdc-b2e8-1b36b72ff88e\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb" Nov 21 14:39:21 crc kubenswrapper[5133]: I1121 14:39:21.275715 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6r24h\" (UniqueName: \"kubernetes.io/projected/25ee67bf-c176-4cdc-b2e8-1b36b72ff88e-kube-api-access-6r24h\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb\" (UID: \"25ee67bf-c176-4cdc-b2e8-1b36b72ff88e\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb" Nov 21 14:39:21 crc kubenswrapper[5133]: I1121 14:39:21.275749 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/25ee67bf-c176-4cdc-b2e8-1b36b72ff88e-nova-migration-ssh-key-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb\" (UID: \"25ee67bf-c176-4cdc-b2e8-1b36b72ff88e\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb" Nov 21 14:39:21 crc kubenswrapper[5133]: I1121 14:39:21.275782 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/25ee67bf-c176-4cdc-b2e8-1b36b72ff88e-nova-extra-config-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb\" (UID: \"25ee67bf-c176-4cdc-b2e8-1b36b72ff88e\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb" Nov 21 14:39:21 crc kubenswrapper[5133]: I1121 14:39:21.275800 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph-nova-0\" (UniqueName: \"kubernetes.io/configmap/25ee67bf-c176-4cdc-b2e8-1b36b72ff88e-ceph-nova-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb\" (UID: \"25ee67bf-c176-4cdc-b2e8-1b36b72ff88e\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb" Nov 21 14:39:21 crc kubenswrapper[5133]: I1121 14:39:21.275829 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/25ee67bf-c176-4cdc-b2e8-1b36b72ff88e-nova-cell1-compute-config-1\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb\" (UID: \"25ee67bf-c176-4cdc-b2e8-1b36b72ff88e\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb" Nov 21 14:39:21 crc kubenswrapper[5133]: I1121 14:39:21.277845 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/25ee67bf-c176-4cdc-b2e8-1b36b72ff88e-nova-extra-config-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb\" (UID: \"25ee67bf-c176-4cdc-b2e8-1b36b72ff88e\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb" Nov 21 14:39:21 crc kubenswrapper[5133]: I1121 14:39:21.278052 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph-nova-0\" (UniqueName: \"kubernetes.io/configmap/25ee67bf-c176-4cdc-b2e8-1b36b72ff88e-ceph-nova-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb\" (UID: \"25ee67bf-c176-4cdc-b2e8-1b36b72ff88e\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb" Nov 21 14:39:21 crc kubenswrapper[5133]: I1121 14:39:21.280358 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/25ee67bf-c176-4cdc-b2e8-1b36b72ff88e-ssh-key\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb\" (UID: \"25ee67bf-c176-4cdc-b2e8-1b36b72ff88e\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb" Nov 21 14:39:21 crc kubenswrapper[5133]: I1121 14:39:21.280958 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/25ee67bf-c176-4cdc-b2e8-1b36b72ff88e-inventory\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb\" (UID: \"25ee67bf-c176-4cdc-b2e8-1b36b72ff88e\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb" Nov 21 14:39:21 crc kubenswrapper[5133]: I1121 14:39:21.281587 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/25ee67bf-c176-4cdc-b2e8-1b36b72ff88e-nova-migration-ssh-key-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb\" (UID: \"25ee67bf-c176-4cdc-b2e8-1b36b72ff88e\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb" Nov 21 14:39:21 crc kubenswrapper[5133]: I1121 14:39:21.281670 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/25ee67bf-c176-4cdc-b2e8-1b36b72ff88e-ceph\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb\" (UID: \"25ee67bf-c176-4cdc-b2e8-1b36b72ff88e\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb" Nov 21 14:39:21 crc kubenswrapper[5133]: I1121 14:39:21.281940 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-custom-ceph-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/25ee67bf-c176-4cdc-b2e8-1b36b72ff88e-nova-custom-ceph-combined-ca-bundle\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb\" (UID: \"25ee67bf-c176-4cdc-b2e8-1b36b72ff88e\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb" Nov 21 14:39:21 crc kubenswrapper[5133]: I1121 14:39:21.282779 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/25ee67bf-c176-4cdc-b2e8-1b36b72ff88e-nova-cell1-compute-config-1\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb\" (UID: \"25ee67bf-c176-4cdc-b2e8-1b36b72ff88e\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb" Nov 21 14:39:21 crc kubenswrapper[5133]: I1121 14:39:21.283924 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/25ee67bf-c176-4cdc-b2e8-1b36b72ff88e-nova-migration-ssh-key-1\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb\" (UID: \"25ee67bf-c176-4cdc-b2e8-1b36b72ff88e\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb" Nov 21 14:39:21 crc kubenswrapper[5133]: I1121 14:39:21.287332 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/25ee67bf-c176-4cdc-b2e8-1b36b72ff88e-nova-cell1-compute-config-0\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb\" (UID: \"25ee67bf-c176-4cdc-b2e8-1b36b72ff88e\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb" Nov 21 14:39:21 crc kubenswrapper[5133]: I1121 14:39:21.297620 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6r24h\" (UniqueName: \"kubernetes.io/projected/25ee67bf-c176-4cdc-b2e8-1b36b72ff88e-kube-api-access-6r24h\") pod \"nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb\" (UID: \"25ee67bf-c176-4cdc-b2e8-1b36b72ff88e\") " pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb" Nov 21 14:39:21 crc kubenswrapper[5133]: I1121 14:39:21.403773 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb" Nov 21 14:39:21 crc kubenswrapper[5133]: I1121 14:39:21.970168 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb"] Nov 21 14:39:21 crc kubenswrapper[5133]: I1121 14:39:21.975388 5133 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 21 14:39:22 crc kubenswrapper[5133]: I1121 14:39:22.011805 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb" event={"ID":"25ee67bf-c176-4cdc-b2e8-1b36b72ff88e","Type":"ContainerStarted","Data":"bb00d1c3dc36891d8fd71891cd32db3dc4a45824a484f7261eb225341fd617a3"} Nov 21 14:39:23 crc kubenswrapper[5133]: I1121 14:39:23.689433 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 21 14:39:25 crc kubenswrapper[5133]: I1121 14:39:25.037507 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb" event={"ID":"25ee67bf-c176-4cdc-b2e8-1b36b72ff88e","Type":"ContainerStarted","Data":"d2cd926b83d94a0b4a165949f1f113f238c2030026c38d067099890b05f2feee"} Nov 21 14:39:25 crc kubenswrapper[5133]: I1121 14:39:25.064793 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb" podStartSLOduration=2.353866226 podStartE2EDuration="4.064773683s" podCreationTimestamp="2025-11-21 14:39:21 +0000 UTC" firstStartedPulling="2025-11-21 14:39:21.975183921 +0000 UTC m=+3421.773016169" lastFinishedPulling="2025-11-21 14:39:23.686091348 +0000 UTC m=+3423.483923626" observedRunningTime="2025-11-21 14:39:25.057701943 +0000 UTC m=+3424.855534191" watchObservedRunningTime="2025-11-21 14:39:25.064773683 +0000 UTC m=+3424.862605931" Nov 21 14:40:02 crc kubenswrapper[5133]: I1121 14:40:02.045769 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-r5776"] Nov 21 14:40:02 crc kubenswrapper[5133]: I1121 14:40:02.049423 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-r5776" Nov 21 14:40:02 crc kubenswrapper[5133]: I1121 14:40:02.062509 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-r5776"] Nov 21 14:40:02 crc kubenswrapper[5133]: I1121 14:40:02.161592 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/920b19be-7ba1-49e5-a986-f6c9ef724a12-catalog-content\") pod \"certified-operators-r5776\" (UID: \"920b19be-7ba1-49e5-a986-f6c9ef724a12\") " pod="openshift-marketplace/certified-operators-r5776" Nov 21 14:40:02 crc kubenswrapper[5133]: I1121 14:40:02.161647 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/920b19be-7ba1-49e5-a986-f6c9ef724a12-utilities\") pod \"certified-operators-r5776\" (UID: \"920b19be-7ba1-49e5-a986-f6c9ef724a12\") " pod="openshift-marketplace/certified-operators-r5776" Nov 21 14:40:02 crc kubenswrapper[5133]: I1121 14:40:02.161727 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kff97\" (UniqueName: \"kubernetes.io/projected/920b19be-7ba1-49e5-a986-f6c9ef724a12-kube-api-access-kff97\") pod \"certified-operators-r5776\" (UID: \"920b19be-7ba1-49e5-a986-f6c9ef724a12\") " pod="openshift-marketplace/certified-operators-r5776" Nov 21 14:40:02 crc kubenswrapper[5133]: I1121 14:40:02.263654 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/920b19be-7ba1-49e5-a986-f6c9ef724a12-catalog-content\") pod \"certified-operators-r5776\" (UID: \"920b19be-7ba1-49e5-a986-f6c9ef724a12\") " pod="openshift-marketplace/certified-operators-r5776" Nov 21 14:40:02 crc kubenswrapper[5133]: I1121 14:40:02.264122 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/920b19be-7ba1-49e5-a986-f6c9ef724a12-utilities\") pod \"certified-operators-r5776\" (UID: \"920b19be-7ba1-49e5-a986-f6c9ef724a12\") " pod="openshift-marketplace/certified-operators-r5776" Nov 21 14:40:02 crc kubenswrapper[5133]: I1121 14:40:02.264279 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kff97\" (UniqueName: \"kubernetes.io/projected/920b19be-7ba1-49e5-a986-f6c9ef724a12-kube-api-access-kff97\") pod \"certified-operators-r5776\" (UID: \"920b19be-7ba1-49e5-a986-f6c9ef724a12\") " pod="openshift-marketplace/certified-operators-r5776" Nov 21 14:40:02 crc kubenswrapper[5133]: I1121 14:40:02.264599 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/920b19be-7ba1-49e5-a986-f6c9ef724a12-utilities\") pod \"certified-operators-r5776\" (UID: \"920b19be-7ba1-49e5-a986-f6c9ef724a12\") " pod="openshift-marketplace/certified-operators-r5776" Nov 21 14:40:02 crc kubenswrapper[5133]: I1121 14:40:02.264608 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/920b19be-7ba1-49e5-a986-f6c9ef724a12-catalog-content\") pod \"certified-operators-r5776\" (UID: \"920b19be-7ba1-49e5-a986-f6c9ef724a12\") " pod="openshift-marketplace/certified-operators-r5776" Nov 21 14:40:02 crc kubenswrapper[5133]: I1121 14:40:02.289228 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kff97\" (UniqueName: \"kubernetes.io/projected/920b19be-7ba1-49e5-a986-f6c9ef724a12-kube-api-access-kff97\") pod \"certified-operators-r5776\" (UID: \"920b19be-7ba1-49e5-a986-f6c9ef724a12\") " pod="openshift-marketplace/certified-operators-r5776" Nov 21 14:40:02 crc kubenswrapper[5133]: I1121 14:40:02.419819 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-r5776" Nov 21 14:40:02 crc kubenswrapper[5133]: I1121 14:40:02.945889 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-r5776"] Nov 21 14:40:02 crc kubenswrapper[5133]: W1121 14:40:02.952187 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod920b19be_7ba1_49e5_a986_f6c9ef724a12.slice/crio-a3cde73426064c5e7d2d385110ca452f4363d9754bc242c33e48af0d49d4104a WatchSource:0}: Error finding container a3cde73426064c5e7d2d385110ca452f4363d9754bc242c33e48af0d49d4104a: Status 404 returned error can't find the container with id a3cde73426064c5e7d2d385110ca452f4363d9754bc242c33e48af0d49d4104a Nov 21 14:40:03 crc kubenswrapper[5133]: I1121 14:40:03.412035 5133 generic.go:334] "Generic (PLEG): container finished" podID="920b19be-7ba1-49e5-a986-f6c9ef724a12" containerID="9c16df51279292225dcff4c4422094714f6b7677f8eda86aab32ff3f6ed4de58" exitCode=0 Nov 21 14:40:03 crc kubenswrapper[5133]: I1121 14:40:03.412133 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r5776" event={"ID":"920b19be-7ba1-49e5-a986-f6c9ef724a12","Type":"ContainerDied","Data":"9c16df51279292225dcff4c4422094714f6b7677f8eda86aab32ff3f6ed4de58"} Nov 21 14:40:03 crc kubenswrapper[5133]: I1121 14:40:03.412376 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r5776" event={"ID":"920b19be-7ba1-49e5-a986-f6c9ef724a12","Type":"ContainerStarted","Data":"a3cde73426064c5e7d2d385110ca452f4363d9754bc242c33e48af0d49d4104a"} Nov 21 14:40:07 crc kubenswrapper[5133]: E1121 14:40:07.060154 5133 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod920b19be_7ba1_49e5_a986_f6c9ef724a12.slice/crio-conmon-9e4051e09631f040c82c8dbe874d050d8545a4967af66dcb91d967968cf7b2f2.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod920b19be_7ba1_49e5_a986_f6c9ef724a12.slice/crio-9e4051e09631f040c82c8dbe874d050d8545a4967af66dcb91d967968cf7b2f2.scope\": RecentStats: unable to find data in memory cache]" Nov 21 14:40:07 crc kubenswrapper[5133]: I1121 14:40:07.460557 5133 generic.go:334] "Generic (PLEG): container finished" podID="920b19be-7ba1-49e5-a986-f6c9ef724a12" containerID="9e4051e09631f040c82c8dbe874d050d8545a4967af66dcb91d967968cf7b2f2" exitCode=0 Nov 21 14:40:07 crc kubenswrapper[5133]: I1121 14:40:07.460615 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r5776" event={"ID":"920b19be-7ba1-49e5-a986-f6c9ef724a12","Type":"ContainerDied","Data":"9e4051e09631f040c82c8dbe874d050d8545a4967af66dcb91d967968cf7b2f2"} Nov 21 14:40:08 crc kubenswrapper[5133]: I1121 14:40:08.471517 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r5776" event={"ID":"920b19be-7ba1-49e5-a986-f6c9ef724a12","Type":"ContainerStarted","Data":"c583df60e66ec84c277470f5063200eb6e2e727fa24b3ec3ad4099a91755a9a2"} Nov 21 14:40:08 crc kubenswrapper[5133]: I1121 14:40:08.491053 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-r5776" podStartSLOduration=1.754825792 podStartE2EDuration="6.491026076s" podCreationTimestamp="2025-11-21 14:40:02 +0000 UTC" firstStartedPulling="2025-11-21 14:40:03.413560227 +0000 UTC m=+3463.211392475" lastFinishedPulling="2025-11-21 14:40:08.149760511 +0000 UTC m=+3467.947592759" observedRunningTime="2025-11-21 14:40:08.48706133 +0000 UTC m=+3468.284893598" watchObservedRunningTime="2025-11-21 14:40:08.491026076 +0000 UTC m=+3468.288858354" Nov 21 14:40:12 crc kubenswrapper[5133]: I1121 14:40:12.420296 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-r5776" Nov 21 14:40:12 crc kubenswrapper[5133]: I1121 14:40:12.420775 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-r5776" Nov 21 14:40:12 crc kubenswrapper[5133]: I1121 14:40:12.514476 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-r5776" Nov 21 14:40:22 crc kubenswrapper[5133]: I1121 14:40:22.486594 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-r5776" Nov 21 14:40:22 crc kubenswrapper[5133]: I1121 14:40:22.547516 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-r5776"] Nov 21 14:40:22 crc kubenswrapper[5133]: I1121 14:40:22.624339 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-r5776" podUID="920b19be-7ba1-49e5-a986-f6c9ef724a12" containerName="registry-server" containerID="cri-o://c583df60e66ec84c277470f5063200eb6e2e727fa24b3ec3ad4099a91755a9a2" gracePeriod=2 Nov 21 14:40:23 crc kubenswrapper[5133]: I1121 14:40:23.213667 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-r5776" Nov 21 14:40:23 crc kubenswrapper[5133]: I1121 14:40:23.283747 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/920b19be-7ba1-49e5-a986-f6c9ef724a12-utilities\") pod \"920b19be-7ba1-49e5-a986-f6c9ef724a12\" (UID: \"920b19be-7ba1-49e5-a986-f6c9ef724a12\") " Nov 21 14:40:23 crc kubenswrapper[5133]: I1121 14:40:23.283873 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kff97\" (UniqueName: \"kubernetes.io/projected/920b19be-7ba1-49e5-a986-f6c9ef724a12-kube-api-access-kff97\") pod \"920b19be-7ba1-49e5-a986-f6c9ef724a12\" (UID: \"920b19be-7ba1-49e5-a986-f6c9ef724a12\") " Nov 21 14:40:23 crc kubenswrapper[5133]: I1121 14:40:23.284009 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/920b19be-7ba1-49e5-a986-f6c9ef724a12-catalog-content\") pod \"920b19be-7ba1-49e5-a986-f6c9ef724a12\" (UID: \"920b19be-7ba1-49e5-a986-f6c9ef724a12\") " Nov 21 14:40:23 crc kubenswrapper[5133]: I1121 14:40:23.284860 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/920b19be-7ba1-49e5-a986-f6c9ef724a12-utilities" (OuterVolumeSpecName: "utilities") pod "920b19be-7ba1-49e5-a986-f6c9ef724a12" (UID: "920b19be-7ba1-49e5-a986-f6c9ef724a12"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:40:23 crc kubenswrapper[5133]: I1121 14:40:23.289450 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/920b19be-7ba1-49e5-a986-f6c9ef724a12-kube-api-access-kff97" (OuterVolumeSpecName: "kube-api-access-kff97") pod "920b19be-7ba1-49e5-a986-f6c9ef724a12" (UID: "920b19be-7ba1-49e5-a986-f6c9ef724a12"). InnerVolumeSpecName "kube-api-access-kff97". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:40:23 crc kubenswrapper[5133]: I1121 14:40:23.310596 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 14:40:23 crc kubenswrapper[5133]: I1121 14:40:23.310683 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 14:40:23 crc kubenswrapper[5133]: I1121 14:40:23.329312 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/920b19be-7ba1-49e5-a986-f6c9ef724a12-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "920b19be-7ba1-49e5-a986-f6c9ef724a12" (UID: "920b19be-7ba1-49e5-a986-f6c9ef724a12"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:40:23 crc kubenswrapper[5133]: I1121 14:40:23.385875 5133 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/920b19be-7ba1-49e5-a986-f6c9ef724a12-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 14:40:23 crc kubenswrapper[5133]: I1121 14:40:23.385924 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kff97\" (UniqueName: \"kubernetes.io/projected/920b19be-7ba1-49e5-a986-f6c9ef724a12-kube-api-access-kff97\") on node \"crc\" DevicePath \"\"" Nov 21 14:40:23 crc kubenswrapper[5133]: I1121 14:40:23.385937 5133 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/920b19be-7ba1-49e5-a986-f6c9ef724a12-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 14:40:23 crc kubenswrapper[5133]: I1121 14:40:23.636121 5133 generic.go:334] "Generic (PLEG): container finished" podID="920b19be-7ba1-49e5-a986-f6c9ef724a12" containerID="c583df60e66ec84c277470f5063200eb6e2e727fa24b3ec3ad4099a91755a9a2" exitCode=0 Nov 21 14:40:23 crc kubenswrapper[5133]: I1121 14:40:23.636169 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-r5776" Nov 21 14:40:23 crc kubenswrapper[5133]: I1121 14:40:23.636188 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r5776" event={"ID":"920b19be-7ba1-49e5-a986-f6c9ef724a12","Type":"ContainerDied","Data":"c583df60e66ec84c277470f5063200eb6e2e727fa24b3ec3ad4099a91755a9a2"} Nov 21 14:40:23 crc kubenswrapper[5133]: I1121 14:40:23.636633 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r5776" event={"ID":"920b19be-7ba1-49e5-a986-f6c9ef724a12","Type":"ContainerDied","Data":"a3cde73426064c5e7d2d385110ca452f4363d9754bc242c33e48af0d49d4104a"} Nov 21 14:40:23 crc kubenswrapper[5133]: I1121 14:40:23.636650 5133 scope.go:117] "RemoveContainer" containerID="c583df60e66ec84c277470f5063200eb6e2e727fa24b3ec3ad4099a91755a9a2" Nov 21 14:40:23 crc kubenswrapper[5133]: I1121 14:40:23.671900 5133 scope.go:117] "RemoveContainer" containerID="9e4051e09631f040c82c8dbe874d050d8545a4967af66dcb91d967968cf7b2f2" Nov 21 14:40:23 crc kubenswrapper[5133]: I1121 14:40:23.673783 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-r5776"] Nov 21 14:40:23 crc kubenswrapper[5133]: I1121 14:40:23.680904 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-r5776"] Nov 21 14:40:23 crc kubenswrapper[5133]: I1121 14:40:23.699076 5133 scope.go:117] "RemoveContainer" containerID="9c16df51279292225dcff4c4422094714f6b7677f8eda86aab32ff3f6ed4de58" Nov 21 14:40:23 crc kubenswrapper[5133]: I1121 14:40:23.744143 5133 scope.go:117] "RemoveContainer" containerID="c583df60e66ec84c277470f5063200eb6e2e727fa24b3ec3ad4099a91755a9a2" Nov 21 14:40:23 crc kubenswrapper[5133]: E1121 14:40:23.744627 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c583df60e66ec84c277470f5063200eb6e2e727fa24b3ec3ad4099a91755a9a2\": container with ID starting with c583df60e66ec84c277470f5063200eb6e2e727fa24b3ec3ad4099a91755a9a2 not found: ID does not exist" containerID="c583df60e66ec84c277470f5063200eb6e2e727fa24b3ec3ad4099a91755a9a2" Nov 21 14:40:23 crc kubenswrapper[5133]: I1121 14:40:23.744673 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c583df60e66ec84c277470f5063200eb6e2e727fa24b3ec3ad4099a91755a9a2"} err="failed to get container status \"c583df60e66ec84c277470f5063200eb6e2e727fa24b3ec3ad4099a91755a9a2\": rpc error: code = NotFound desc = could not find container \"c583df60e66ec84c277470f5063200eb6e2e727fa24b3ec3ad4099a91755a9a2\": container with ID starting with c583df60e66ec84c277470f5063200eb6e2e727fa24b3ec3ad4099a91755a9a2 not found: ID does not exist" Nov 21 14:40:23 crc kubenswrapper[5133]: I1121 14:40:23.744702 5133 scope.go:117] "RemoveContainer" containerID="9e4051e09631f040c82c8dbe874d050d8545a4967af66dcb91d967968cf7b2f2" Nov 21 14:40:23 crc kubenswrapper[5133]: E1121 14:40:23.745494 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9e4051e09631f040c82c8dbe874d050d8545a4967af66dcb91d967968cf7b2f2\": container with ID starting with 9e4051e09631f040c82c8dbe874d050d8545a4967af66dcb91d967968cf7b2f2 not found: ID does not exist" containerID="9e4051e09631f040c82c8dbe874d050d8545a4967af66dcb91d967968cf7b2f2" Nov 21 14:40:23 crc kubenswrapper[5133]: I1121 14:40:23.745530 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9e4051e09631f040c82c8dbe874d050d8545a4967af66dcb91d967968cf7b2f2"} err="failed to get container status \"9e4051e09631f040c82c8dbe874d050d8545a4967af66dcb91d967968cf7b2f2\": rpc error: code = NotFound desc = could not find container \"9e4051e09631f040c82c8dbe874d050d8545a4967af66dcb91d967968cf7b2f2\": container with ID starting with 9e4051e09631f040c82c8dbe874d050d8545a4967af66dcb91d967968cf7b2f2 not found: ID does not exist" Nov 21 14:40:23 crc kubenswrapper[5133]: I1121 14:40:23.745555 5133 scope.go:117] "RemoveContainer" containerID="9c16df51279292225dcff4c4422094714f6b7677f8eda86aab32ff3f6ed4de58" Nov 21 14:40:23 crc kubenswrapper[5133]: E1121 14:40:23.746548 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9c16df51279292225dcff4c4422094714f6b7677f8eda86aab32ff3f6ed4de58\": container with ID starting with 9c16df51279292225dcff4c4422094714f6b7677f8eda86aab32ff3f6ed4de58 not found: ID does not exist" containerID="9c16df51279292225dcff4c4422094714f6b7677f8eda86aab32ff3f6ed4de58" Nov 21 14:40:23 crc kubenswrapper[5133]: I1121 14:40:23.746620 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9c16df51279292225dcff4c4422094714f6b7677f8eda86aab32ff3f6ed4de58"} err="failed to get container status \"9c16df51279292225dcff4c4422094714f6b7677f8eda86aab32ff3f6ed4de58\": rpc error: code = NotFound desc = could not find container \"9c16df51279292225dcff4c4422094714f6b7677f8eda86aab32ff3f6ed4de58\": container with ID starting with 9c16df51279292225dcff4c4422094714f6b7677f8eda86aab32ff3f6ed4de58 not found: ID does not exist" Nov 21 14:40:24 crc kubenswrapper[5133]: I1121 14:40:24.476557 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="920b19be-7ba1-49e5-a986-f6c9ef724a12" path="/var/lib/kubelet/pods/920b19be-7ba1-49e5-a986-f6c9ef724a12/volumes" Nov 21 14:40:53 crc kubenswrapper[5133]: I1121 14:40:53.311078 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 14:40:53 crc kubenswrapper[5133]: I1121 14:40:53.311742 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 14:41:23 crc kubenswrapper[5133]: I1121 14:41:23.310486 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 14:41:23 crc kubenswrapper[5133]: I1121 14:41:23.311390 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 14:41:23 crc kubenswrapper[5133]: I1121 14:41:23.311487 5133 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" Nov 21 14:41:23 crc kubenswrapper[5133]: I1121 14:41:23.312655 5133 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"142b6de6b55e03cb42254cbecc2c0c858a38432be696d3f2b827d387bb6c86eb"} pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 14:41:23 crc kubenswrapper[5133]: I1121 14:41:23.312783 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" containerID="cri-o://142b6de6b55e03cb42254cbecc2c0c858a38432be696d3f2b827d387bb6c86eb" gracePeriod=600 Nov 21 14:41:24 crc kubenswrapper[5133]: I1121 14:41:24.249441 5133 generic.go:334] "Generic (PLEG): container finished" podID="52f5a729-05d1-4f84-a216-1df3233af57d" containerID="142b6de6b55e03cb42254cbecc2c0c858a38432be696d3f2b827d387bb6c86eb" exitCode=0 Nov 21 14:41:24 crc kubenswrapper[5133]: I1121 14:41:24.249531 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" event={"ID":"52f5a729-05d1-4f84-a216-1df3233af57d","Type":"ContainerDied","Data":"142b6de6b55e03cb42254cbecc2c0c858a38432be696d3f2b827d387bb6c86eb"} Nov 21 14:41:24 crc kubenswrapper[5133]: I1121 14:41:24.249923 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" event={"ID":"52f5a729-05d1-4f84-a216-1df3233af57d","Type":"ContainerStarted","Data":"8580d7a7211cee4078050130d68876fc21d141fe3a8eaa4aa514ada3bc5ab459"} Nov 21 14:41:24 crc kubenswrapper[5133]: I1121 14:41:24.249951 5133 scope.go:117] "RemoveContainer" containerID="76b8364e4af45cbd19eaaf4b68ddf1c52013121c27d1586f3a2c762b2e050cef" Nov 21 14:41:56 crc kubenswrapper[5133]: I1121 14:41:56.846615 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-66bjk"] Nov 21 14:41:56 crc kubenswrapper[5133]: E1121 14:41:56.847712 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="920b19be-7ba1-49e5-a986-f6c9ef724a12" containerName="extract-content" Nov 21 14:41:56 crc kubenswrapper[5133]: I1121 14:41:56.847729 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="920b19be-7ba1-49e5-a986-f6c9ef724a12" containerName="extract-content" Nov 21 14:41:56 crc kubenswrapper[5133]: E1121 14:41:56.847769 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="920b19be-7ba1-49e5-a986-f6c9ef724a12" containerName="registry-server" Nov 21 14:41:56 crc kubenswrapper[5133]: I1121 14:41:56.847778 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="920b19be-7ba1-49e5-a986-f6c9ef724a12" containerName="registry-server" Nov 21 14:41:56 crc kubenswrapper[5133]: E1121 14:41:56.847791 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="920b19be-7ba1-49e5-a986-f6c9ef724a12" containerName="extract-utilities" Nov 21 14:41:56 crc kubenswrapper[5133]: I1121 14:41:56.847798 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="920b19be-7ba1-49e5-a986-f6c9ef724a12" containerName="extract-utilities" Nov 21 14:41:56 crc kubenswrapper[5133]: I1121 14:41:56.848035 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="920b19be-7ba1-49e5-a986-f6c9ef724a12" containerName="registry-server" Nov 21 14:41:56 crc kubenswrapper[5133]: I1121 14:41:56.849660 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-66bjk" Nov 21 14:41:56 crc kubenswrapper[5133]: I1121 14:41:56.855907 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-66bjk"] Nov 21 14:41:56 crc kubenswrapper[5133]: I1121 14:41:56.976328 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rzx25\" (UniqueName: \"kubernetes.io/projected/bd4f4eef-d68e-4e77-ac48-801d7f79715c-kube-api-access-rzx25\") pod \"redhat-marketplace-66bjk\" (UID: \"bd4f4eef-d68e-4e77-ac48-801d7f79715c\") " pod="openshift-marketplace/redhat-marketplace-66bjk" Nov 21 14:41:56 crc kubenswrapper[5133]: I1121 14:41:56.976396 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bd4f4eef-d68e-4e77-ac48-801d7f79715c-catalog-content\") pod \"redhat-marketplace-66bjk\" (UID: \"bd4f4eef-d68e-4e77-ac48-801d7f79715c\") " pod="openshift-marketplace/redhat-marketplace-66bjk" Nov 21 14:41:56 crc kubenswrapper[5133]: I1121 14:41:56.976581 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bd4f4eef-d68e-4e77-ac48-801d7f79715c-utilities\") pod \"redhat-marketplace-66bjk\" (UID: \"bd4f4eef-d68e-4e77-ac48-801d7f79715c\") " pod="openshift-marketplace/redhat-marketplace-66bjk" Nov 21 14:41:57 crc kubenswrapper[5133]: I1121 14:41:57.078488 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bd4f4eef-d68e-4e77-ac48-801d7f79715c-catalog-content\") pod \"redhat-marketplace-66bjk\" (UID: \"bd4f4eef-d68e-4e77-ac48-801d7f79715c\") " pod="openshift-marketplace/redhat-marketplace-66bjk" Nov 21 14:41:57 crc kubenswrapper[5133]: I1121 14:41:57.078636 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bd4f4eef-d68e-4e77-ac48-801d7f79715c-utilities\") pod \"redhat-marketplace-66bjk\" (UID: \"bd4f4eef-d68e-4e77-ac48-801d7f79715c\") " pod="openshift-marketplace/redhat-marketplace-66bjk" Nov 21 14:41:57 crc kubenswrapper[5133]: I1121 14:41:57.078704 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rzx25\" (UniqueName: \"kubernetes.io/projected/bd4f4eef-d68e-4e77-ac48-801d7f79715c-kube-api-access-rzx25\") pod \"redhat-marketplace-66bjk\" (UID: \"bd4f4eef-d68e-4e77-ac48-801d7f79715c\") " pod="openshift-marketplace/redhat-marketplace-66bjk" Nov 21 14:41:57 crc kubenswrapper[5133]: I1121 14:41:57.079122 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bd4f4eef-d68e-4e77-ac48-801d7f79715c-catalog-content\") pod \"redhat-marketplace-66bjk\" (UID: \"bd4f4eef-d68e-4e77-ac48-801d7f79715c\") " pod="openshift-marketplace/redhat-marketplace-66bjk" Nov 21 14:41:57 crc kubenswrapper[5133]: I1121 14:41:57.079272 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bd4f4eef-d68e-4e77-ac48-801d7f79715c-utilities\") pod \"redhat-marketplace-66bjk\" (UID: \"bd4f4eef-d68e-4e77-ac48-801d7f79715c\") " pod="openshift-marketplace/redhat-marketplace-66bjk" Nov 21 14:41:57 crc kubenswrapper[5133]: I1121 14:41:57.098422 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rzx25\" (UniqueName: \"kubernetes.io/projected/bd4f4eef-d68e-4e77-ac48-801d7f79715c-kube-api-access-rzx25\") pod \"redhat-marketplace-66bjk\" (UID: \"bd4f4eef-d68e-4e77-ac48-801d7f79715c\") " pod="openshift-marketplace/redhat-marketplace-66bjk" Nov 21 14:41:57 crc kubenswrapper[5133]: I1121 14:41:57.174217 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-66bjk" Nov 21 14:41:57 crc kubenswrapper[5133]: I1121 14:41:57.673755 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-66bjk"] Nov 21 14:41:58 crc kubenswrapper[5133]: I1121 14:41:58.562904 5133 generic.go:334] "Generic (PLEG): container finished" podID="bd4f4eef-d68e-4e77-ac48-801d7f79715c" containerID="72c408c36de2699e9a608ec06da01c0da7b5b2ce340091df489595fb98a726a7" exitCode=0 Nov 21 14:41:58 crc kubenswrapper[5133]: I1121 14:41:58.563019 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-66bjk" event={"ID":"bd4f4eef-d68e-4e77-ac48-801d7f79715c","Type":"ContainerDied","Data":"72c408c36de2699e9a608ec06da01c0da7b5b2ce340091df489595fb98a726a7"} Nov 21 14:41:58 crc kubenswrapper[5133]: I1121 14:41:58.563286 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-66bjk" event={"ID":"bd4f4eef-d68e-4e77-ac48-801d7f79715c","Type":"ContainerStarted","Data":"f783a70cf822fa7cc36e0f473f36014733c0a1ba05e4e6fca98c3506a2e9c699"} Nov 21 14:42:01 crc kubenswrapper[5133]: I1121 14:42:01.590024 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-66bjk" event={"ID":"bd4f4eef-d68e-4e77-ac48-801d7f79715c","Type":"ContainerDied","Data":"0df8d5b36dfa4f4ed301dac37a3b3d478e0e9dcea5f50f0fd40a5555d8d10853"} Nov 21 14:42:01 crc kubenswrapper[5133]: I1121 14:42:01.590020 5133 generic.go:334] "Generic (PLEG): container finished" podID="bd4f4eef-d68e-4e77-ac48-801d7f79715c" containerID="0df8d5b36dfa4f4ed301dac37a3b3d478e0e9dcea5f50f0fd40a5555d8d10853" exitCode=0 Nov 21 14:42:03 crc kubenswrapper[5133]: I1121 14:42:03.611015 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-66bjk" event={"ID":"bd4f4eef-d68e-4e77-ac48-801d7f79715c","Type":"ContainerStarted","Data":"3675a30f9887180b74fccced679e6439731bb86aff775ddd678aa15c95d17095"} Nov 21 14:42:03 crc kubenswrapper[5133]: I1121 14:42:03.635108 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-66bjk" podStartSLOduration=3.224777271 podStartE2EDuration="7.635090763s" podCreationTimestamp="2025-11-21 14:41:56 +0000 UTC" firstStartedPulling="2025-11-21 14:41:58.565881412 +0000 UTC m=+3578.363713670" lastFinishedPulling="2025-11-21 14:42:02.976194914 +0000 UTC m=+3582.774027162" observedRunningTime="2025-11-21 14:42:03.628484157 +0000 UTC m=+3583.426316415" watchObservedRunningTime="2025-11-21 14:42:03.635090763 +0000 UTC m=+3583.432923011" Nov 21 14:42:07 crc kubenswrapper[5133]: I1121 14:42:07.175150 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-66bjk" Nov 21 14:42:07 crc kubenswrapper[5133]: I1121 14:42:07.175426 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-66bjk" Nov 21 14:42:07 crc kubenswrapper[5133]: I1121 14:42:07.244189 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-66bjk" Nov 21 14:42:17 crc kubenswrapper[5133]: I1121 14:42:17.244602 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-66bjk" Nov 21 14:42:17 crc kubenswrapper[5133]: I1121 14:42:17.294728 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-66bjk"] Nov 21 14:42:17 crc kubenswrapper[5133]: I1121 14:42:17.759372 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-66bjk" podUID="bd4f4eef-d68e-4e77-ac48-801d7f79715c" containerName="registry-server" containerID="cri-o://3675a30f9887180b74fccced679e6439731bb86aff775ddd678aa15c95d17095" gracePeriod=2 Nov 21 14:42:18 crc kubenswrapper[5133]: I1121 14:42:18.770837 5133 generic.go:334] "Generic (PLEG): container finished" podID="bd4f4eef-d68e-4e77-ac48-801d7f79715c" containerID="3675a30f9887180b74fccced679e6439731bb86aff775ddd678aa15c95d17095" exitCode=0 Nov 21 14:42:18 crc kubenswrapper[5133]: I1121 14:42:18.770884 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-66bjk" event={"ID":"bd4f4eef-d68e-4e77-ac48-801d7f79715c","Type":"ContainerDied","Data":"3675a30f9887180b74fccced679e6439731bb86aff775ddd678aa15c95d17095"} Nov 21 14:42:18 crc kubenswrapper[5133]: I1121 14:42:18.771768 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-66bjk" event={"ID":"bd4f4eef-d68e-4e77-ac48-801d7f79715c","Type":"ContainerDied","Data":"f783a70cf822fa7cc36e0f473f36014733c0a1ba05e4e6fca98c3506a2e9c699"} Nov 21 14:42:18 crc kubenswrapper[5133]: I1121 14:42:18.771795 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f783a70cf822fa7cc36e0f473f36014733c0a1ba05e4e6fca98c3506a2e9c699" Nov 21 14:42:18 crc kubenswrapper[5133]: I1121 14:42:18.800780 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-66bjk" Nov 21 14:42:18 crc kubenswrapper[5133]: I1121 14:42:18.895732 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bd4f4eef-d68e-4e77-ac48-801d7f79715c-catalog-content\") pod \"bd4f4eef-d68e-4e77-ac48-801d7f79715c\" (UID: \"bd4f4eef-d68e-4e77-ac48-801d7f79715c\") " Nov 21 14:42:18 crc kubenswrapper[5133]: I1121 14:42:18.895835 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rzx25\" (UniqueName: \"kubernetes.io/projected/bd4f4eef-d68e-4e77-ac48-801d7f79715c-kube-api-access-rzx25\") pod \"bd4f4eef-d68e-4e77-ac48-801d7f79715c\" (UID: \"bd4f4eef-d68e-4e77-ac48-801d7f79715c\") " Nov 21 14:42:18 crc kubenswrapper[5133]: I1121 14:42:18.895956 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bd4f4eef-d68e-4e77-ac48-801d7f79715c-utilities\") pod \"bd4f4eef-d68e-4e77-ac48-801d7f79715c\" (UID: \"bd4f4eef-d68e-4e77-ac48-801d7f79715c\") " Nov 21 14:42:18 crc kubenswrapper[5133]: I1121 14:42:18.897416 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bd4f4eef-d68e-4e77-ac48-801d7f79715c-utilities" (OuterVolumeSpecName: "utilities") pod "bd4f4eef-d68e-4e77-ac48-801d7f79715c" (UID: "bd4f4eef-d68e-4e77-ac48-801d7f79715c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:42:18 crc kubenswrapper[5133]: I1121 14:42:18.902416 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd4f4eef-d68e-4e77-ac48-801d7f79715c-kube-api-access-rzx25" (OuterVolumeSpecName: "kube-api-access-rzx25") pod "bd4f4eef-d68e-4e77-ac48-801d7f79715c" (UID: "bd4f4eef-d68e-4e77-ac48-801d7f79715c"). InnerVolumeSpecName "kube-api-access-rzx25". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:42:18 crc kubenswrapper[5133]: I1121 14:42:18.914165 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bd4f4eef-d68e-4e77-ac48-801d7f79715c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "bd4f4eef-d68e-4e77-ac48-801d7f79715c" (UID: "bd4f4eef-d68e-4e77-ac48-801d7f79715c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:42:18 crc kubenswrapper[5133]: I1121 14:42:18.997764 5133 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bd4f4eef-d68e-4e77-ac48-801d7f79715c-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 14:42:18 crc kubenswrapper[5133]: I1121 14:42:18.998159 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rzx25\" (UniqueName: \"kubernetes.io/projected/bd4f4eef-d68e-4e77-ac48-801d7f79715c-kube-api-access-rzx25\") on node \"crc\" DevicePath \"\"" Nov 21 14:42:18 crc kubenswrapper[5133]: I1121 14:42:18.998248 5133 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bd4f4eef-d68e-4e77-ac48-801d7f79715c-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 14:42:19 crc kubenswrapper[5133]: I1121 14:42:19.782564 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-66bjk" Nov 21 14:42:19 crc kubenswrapper[5133]: I1121 14:42:19.836018 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-66bjk"] Nov 21 14:42:19 crc kubenswrapper[5133]: I1121 14:42:19.848424 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-66bjk"] Nov 21 14:42:20 crc kubenswrapper[5133]: I1121 14:42:20.468499 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd4f4eef-d68e-4e77-ac48-801d7f79715c" path="/var/lib/kubelet/pods/bd4f4eef-d68e-4e77-ac48-801d7f79715c/volumes" Nov 21 14:43:51 crc kubenswrapper[5133]: I1121 14:43:51.616241 5133 generic.go:334] "Generic (PLEG): container finished" podID="25ee67bf-c176-4cdc-b2e8-1b36b72ff88e" containerID="d2cd926b83d94a0b4a165949f1f113f238c2030026c38d067099890b05f2feee" exitCode=0 Nov 21 14:43:51 crc kubenswrapper[5133]: I1121 14:43:51.616334 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb" event={"ID":"25ee67bf-c176-4cdc-b2e8-1b36b72ff88e","Type":"ContainerDied","Data":"d2cd926b83d94a0b4a165949f1f113f238c2030026c38d067099890b05f2feee"} Nov 21 14:43:53 crc kubenswrapper[5133]: I1121 14:43:53.057473 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb" Nov 21 14:43:53 crc kubenswrapper[5133]: I1121 14:43:53.184137 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/25ee67bf-c176-4cdc-b2e8-1b36b72ff88e-nova-migration-ssh-key-1\") pod \"25ee67bf-c176-4cdc-b2e8-1b36b72ff88e\" (UID: \"25ee67bf-c176-4cdc-b2e8-1b36b72ff88e\") " Nov 21 14:43:53 crc kubenswrapper[5133]: I1121 14:43:53.184195 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6r24h\" (UniqueName: \"kubernetes.io/projected/25ee67bf-c176-4cdc-b2e8-1b36b72ff88e-kube-api-access-6r24h\") pod \"25ee67bf-c176-4cdc-b2e8-1b36b72ff88e\" (UID: \"25ee67bf-c176-4cdc-b2e8-1b36b72ff88e\") " Nov 21 14:43:53 crc kubenswrapper[5133]: I1121 14:43:53.184274 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/25ee67bf-c176-4cdc-b2e8-1b36b72ff88e-nova-cell1-compute-config-1\") pod \"25ee67bf-c176-4cdc-b2e8-1b36b72ff88e\" (UID: \"25ee67bf-c176-4cdc-b2e8-1b36b72ff88e\") " Nov 21 14:43:53 crc kubenswrapper[5133]: I1121 14:43:53.184348 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/25ee67bf-c176-4cdc-b2e8-1b36b72ff88e-nova-migration-ssh-key-0\") pod \"25ee67bf-c176-4cdc-b2e8-1b36b72ff88e\" (UID: \"25ee67bf-c176-4cdc-b2e8-1b36b72ff88e\") " Nov 21 14:43:53 crc kubenswrapper[5133]: I1121 14:43:53.184368 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/25ee67bf-c176-4cdc-b2e8-1b36b72ff88e-nova-extra-config-0\") pod \"25ee67bf-c176-4cdc-b2e8-1b36b72ff88e\" (UID: \"25ee67bf-c176-4cdc-b2e8-1b36b72ff88e\") " Nov 21 14:43:53 crc kubenswrapper[5133]: I1121 14:43:53.184386 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/25ee67bf-c176-4cdc-b2e8-1b36b72ff88e-nova-cell1-compute-config-0\") pod \"25ee67bf-c176-4cdc-b2e8-1b36b72ff88e\" (UID: \"25ee67bf-c176-4cdc-b2e8-1b36b72ff88e\") " Nov 21 14:43:53 crc kubenswrapper[5133]: I1121 14:43:53.184444 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/25ee67bf-c176-4cdc-b2e8-1b36b72ff88e-ceph\") pod \"25ee67bf-c176-4cdc-b2e8-1b36b72ff88e\" (UID: \"25ee67bf-c176-4cdc-b2e8-1b36b72ff88e\") " Nov 21 14:43:53 crc kubenswrapper[5133]: I1121 14:43:53.184492 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/25ee67bf-c176-4cdc-b2e8-1b36b72ff88e-ssh-key\") pod \"25ee67bf-c176-4cdc-b2e8-1b36b72ff88e\" (UID: \"25ee67bf-c176-4cdc-b2e8-1b36b72ff88e\") " Nov 21 14:43:53 crc kubenswrapper[5133]: I1121 14:43:53.184547 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph-nova-0\" (UniqueName: \"kubernetes.io/configmap/25ee67bf-c176-4cdc-b2e8-1b36b72ff88e-ceph-nova-0\") pod \"25ee67bf-c176-4cdc-b2e8-1b36b72ff88e\" (UID: \"25ee67bf-c176-4cdc-b2e8-1b36b72ff88e\") " Nov 21 14:43:53 crc kubenswrapper[5133]: I1121 14:43:53.184612 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/25ee67bf-c176-4cdc-b2e8-1b36b72ff88e-inventory\") pod \"25ee67bf-c176-4cdc-b2e8-1b36b72ff88e\" (UID: \"25ee67bf-c176-4cdc-b2e8-1b36b72ff88e\") " Nov 21 14:43:53 crc kubenswrapper[5133]: I1121 14:43:53.184634 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-custom-ceph-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/25ee67bf-c176-4cdc-b2e8-1b36b72ff88e-nova-custom-ceph-combined-ca-bundle\") pod \"25ee67bf-c176-4cdc-b2e8-1b36b72ff88e\" (UID: \"25ee67bf-c176-4cdc-b2e8-1b36b72ff88e\") " Nov 21 14:43:53 crc kubenswrapper[5133]: I1121 14:43:53.190162 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25ee67bf-c176-4cdc-b2e8-1b36b72ff88e-kube-api-access-6r24h" (OuterVolumeSpecName: "kube-api-access-6r24h") pod "25ee67bf-c176-4cdc-b2e8-1b36b72ff88e" (UID: "25ee67bf-c176-4cdc-b2e8-1b36b72ff88e"). InnerVolumeSpecName "kube-api-access-6r24h". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:43:53 crc kubenswrapper[5133]: I1121 14:43:53.191670 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25ee67bf-c176-4cdc-b2e8-1b36b72ff88e-ceph" (OuterVolumeSpecName: "ceph") pod "25ee67bf-c176-4cdc-b2e8-1b36b72ff88e" (UID: "25ee67bf-c176-4cdc-b2e8-1b36b72ff88e"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:43:53 crc kubenswrapper[5133]: I1121 14:43:53.204094 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25ee67bf-c176-4cdc-b2e8-1b36b72ff88e-nova-custom-ceph-combined-ca-bundle" (OuterVolumeSpecName: "nova-custom-ceph-combined-ca-bundle") pod "25ee67bf-c176-4cdc-b2e8-1b36b72ff88e" (UID: "25ee67bf-c176-4cdc-b2e8-1b36b72ff88e"). InnerVolumeSpecName "nova-custom-ceph-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:43:53 crc kubenswrapper[5133]: I1121 14:43:53.211324 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25ee67bf-c176-4cdc-b2e8-1b36b72ff88e-nova-cell1-compute-config-1" (OuterVolumeSpecName: "nova-cell1-compute-config-1") pod "25ee67bf-c176-4cdc-b2e8-1b36b72ff88e" (UID: "25ee67bf-c176-4cdc-b2e8-1b36b72ff88e"). InnerVolumeSpecName "nova-cell1-compute-config-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:43:53 crc kubenswrapper[5133]: I1121 14:43:53.211783 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25ee67bf-c176-4cdc-b2e8-1b36b72ff88e-ceph-nova-0" (OuterVolumeSpecName: "ceph-nova-0") pod "25ee67bf-c176-4cdc-b2e8-1b36b72ff88e" (UID: "25ee67bf-c176-4cdc-b2e8-1b36b72ff88e"). InnerVolumeSpecName "ceph-nova-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:43:53 crc kubenswrapper[5133]: I1121 14:43:53.214848 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25ee67bf-c176-4cdc-b2e8-1b36b72ff88e-nova-extra-config-0" (OuterVolumeSpecName: "nova-extra-config-0") pod "25ee67bf-c176-4cdc-b2e8-1b36b72ff88e" (UID: "25ee67bf-c176-4cdc-b2e8-1b36b72ff88e"). InnerVolumeSpecName "nova-extra-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:43:53 crc kubenswrapper[5133]: I1121 14:43:53.216449 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25ee67bf-c176-4cdc-b2e8-1b36b72ff88e-nova-migration-ssh-key-1" (OuterVolumeSpecName: "nova-migration-ssh-key-1") pod "25ee67bf-c176-4cdc-b2e8-1b36b72ff88e" (UID: "25ee67bf-c176-4cdc-b2e8-1b36b72ff88e"). InnerVolumeSpecName "nova-migration-ssh-key-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:43:53 crc kubenswrapper[5133]: I1121 14:43:53.223652 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25ee67bf-c176-4cdc-b2e8-1b36b72ff88e-nova-migration-ssh-key-0" (OuterVolumeSpecName: "nova-migration-ssh-key-0") pod "25ee67bf-c176-4cdc-b2e8-1b36b72ff88e" (UID: "25ee67bf-c176-4cdc-b2e8-1b36b72ff88e"). InnerVolumeSpecName "nova-migration-ssh-key-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:43:53 crc kubenswrapper[5133]: I1121 14:43:53.233738 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25ee67bf-c176-4cdc-b2e8-1b36b72ff88e-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "25ee67bf-c176-4cdc-b2e8-1b36b72ff88e" (UID: "25ee67bf-c176-4cdc-b2e8-1b36b72ff88e"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:43:53 crc kubenswrapper[5133]: I1121 14:43:53.234248 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25ee67bf-c176-4cdc-b2e8-1b36b72ff88e-nova-cell1-compute-config-0" (OuterVolumeSpecName: "nova-cell1-compute-config-0") pod "25ee67bf-c176-4cdc-b2e8-1b36b72ff88e" (UID: "25ee67bf-c176-4cdc-b2e8-1b36b72ff88e"). InnerVolumeSpecName "nova-cell1-compute-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:43:53 crc kubenswrapper[5133]: I1121 14:43:53.235296 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25ee67bf-c176-4cdc-b2e8-1b36b72ff88e-inventory" (OuterVolumeSpecName: "inventory") pod "25ee67bf-c176-4cdc-b2e8-1b36b72ff88e" (UID: "25ee67bf-c176-4cdc-b2e8-1b36b72ff88e"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:43:53 crc kubenswrapper[5133]: I1121 14:43:53.286734 5133 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/25ee67bf-c176-4cdc-b2e8-1b36b72ff88e-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 21 14:43:53 crc kubenswrapper[5133]: I1121 14:43:53.286780 5133 reconciler_common.go:293] "Volume detached for volume \"ceph-nova-0\" (UniqueName: \"kubernetes.io/configmap/25ee67bf-c176-4cdc-b2e8-1b36b72ff88e-ceph-nova-0\") on node \"crc\" DevicePath \"\"" Nov 21 14:43:53 crc kubenswrapper[5133]: I1121 14:43:53.286795 5133 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/25ee67bf-c176-4cdc-b2e8-1b36b72ff88e-inventory\") on node \"crc\" DevicePath \"\"" Nov 21 14:43:53 crc kubenswrapper[5133]: I1121 14:43:53.286809 5133 reconciler_common.go:293] "Volume detached for volume \"nova-custom-ceph-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/25ee67bf-c176-4cdc-b2e8-1b36b72ff88e-nova-custom-ceph-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 14:43:53 crc kubenswrapper[5133]: I1121 14:43:53.286822 5133 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/25ee67bf-c176-4cdc-b2e8-1b36b72ff88e-nova-migration-ssh-key-1\") on node \"crc\" DevicePath \"\"" Nov 21 14:43:53 crc kubenswrapper[5133]: I1121 14:43:53.286835 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6r24h\" (UniqueName: \"kubernetes.io/projected/25ee67bf-c176-4cdc-b2e8-1b36b72ff88e-kube-api-access-6r24h\") on node \"crc\" DevicePath \"\"" Nov 21 14:43:53 crc kubenswrapper[5133]: I1121 14:43:53.286846 5133 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/25ee67bf-c176-4cdc-b2e8-1b36b72ff88e-nova-cell1-compute-config-1\") on node \"crc\" DevicePath \"\"" Nov 21 14:43:53 crc kubenswrapper[5133]: I1121 14:43:53.286857 5133 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/25ee67bf-c176-4cdc-b2e8-1b36b72ff88e-nova-migration-ssh-key-0\") on node \"crc\" DevicePath \"\"" Nov 21 14:43:53 crc kubenswrapper[5133]: I1121 14:43:53.286867 5133 reconciler_common.go:293] "Volume detached for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/25ee67bf-c176-4cdc-b2e8-1b36b72ff88e-nova-extra-config-0\") on node \"crc\" DevicePath \"\"" Nov 21 14:43:53 crc kubenswrapper[5133]: I1121 14:43:53.286877 5133 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/25ee67bf-c176-4cdc-b2e8-1b36b72ff88e-nova-cell1-compute-config-0\") on node \"crc\" DevicePath \"\"" Nov 21 14:43:53 crc kubenswrapper[5133]: I1121 14:43:53.286886 5133 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/25ee67bf-c176-4cdc-b2e8-1b36b72ff88e-ceph\") on node \"crc\" DevicePath \"\"" Nov 21 14:43:53 crc kubenswrapper[5133]: I1121 14:43:53.310555 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 14:43:53 crc kubenswrapper[5133]: I1121 14:43:53.310610 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 14:43:53 crc kubenswrapper[5133]: I1121 14:43:53.635069 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb" event={"ID":"25ee67bf-c176-4cdc-b2e8-1b36b72ff88e","Type":"ContainerDied","Data":"bb00d1c3dc36891d8fd71891cd32db3dc4a45824a484f7261eb225341fd617a3"} Nov 21 14:43:53 crc kubenswrapper[5133]: I1121 14:43:53.635127 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bb00d1c3dc36891d8fd71891cd32db3dc4a45824a484f7261eb225341fd617a3" Nov 21 14:43:53 crc kubenswrapper[5133]: I1121 14:43:53.635192 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.579788 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-volume-volume1-0"] Nov 21 14:44:08 crc kubenswrapper[5133]: E1121 14:44:08.581155 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="25ee67bf-c176-4cdc-b2e8-1b36b72ff88e" containerName="nova-custom-ceph-edpm-deployment-openstack-edpm-ipam" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.581174 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="25ee67bf-c176-4cdc-b2e8-1b36b72ff88e" containerName="nova-custom-ceph-edpm-deployment-openstack-edpm-ipam" Nov 21 14:44:08 crc kubenswrapper[5133]: E1121 14:44:08.581192 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd4f4eef-d68e-4e77-ac48-801d7f79715c" containerName="registry-server" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.581198 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd4f4eef-d68e-4e77-ac48-801d7f79715c" containerName="registry-server" Nov 21 14:44:08 crc kubenswrapper[5133]: E1121 14:44:08.581213 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd4f4eef-d68e-4e77-ac48-801d7f79715c" containerName="extract-utilities" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.581220 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd4f4eef-d68e-4e77-ac48-801d7f79715c" containerName="extract-utilities" Nov 21 14:44:08 crc kubenswrapper[5133]: E1121 14:44:08.581228 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd4f4eef-d68e-4e77-ac48-801d7f79715c" containerName="extract-content" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.581233 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd4f4eef-d68e-4e77-ac48-801d7f79715c" containerName="extract-content" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.581411 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="25ee67bf-c176-4cdc-b2e8-1b36b72ff88e" containerName="nova-custom-ceph-edpm-deployment-openstack-edpm-ipam" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.581428 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="bd4f4eef-d68e-4e77-ac48-801d7f79715c" containerName="registry-server" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.582379 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-volume-volume1-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.584841 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-volume-volume1-config-data" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.584841 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.612565 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-volume-volume1-0"] Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.665703 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-backup-0"] Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.667320 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-backup-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.669088 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-backup-config-data" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.695290 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/3bf15e01-8975-4617-b353-163613da4bc5-var-lib-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"3bf15e01-8975-4617-b353-163613da4bc5\") " pod="openstack/cinder-volume-volume1-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.695334 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3bf15e01-8975-4617-b353-163613da4bc5-config-data\") pod \"cinder-volume-volume1-0\" (UID: \"3bf15e01-8975-4617-b353-163613da4bc5\") " pod="openstack/cinder-volume-volume1-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.695361 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/3bf15e01-8975-4617-b353-163613da4bc5-dev\") pod \"cinder-volume-volume1-0\" (UID: \"3bf15e01-8975-4617-b353-163613da4bc5\") " pod="openstack/cinder-volume-volume1-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.695379 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/3bf15e01-8975-4617-b353-163613da4bc5-etc-iscsi\") pod \"cinder-volume-volume1-0\" (UID: \"3bf15e01-8975-4617-b353-163613da4bc5\") " pod="openstack/cinder-volume-volume1-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.695399 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/3bf15e01-8975-4617-b353-163613da4bc5-var-locks-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"3bf15e01-8975-4617-b353-163613da4bc5\") " pod="openstack/cinder-volume-volume1-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.695483 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3bf15e01-8975-4617-b353-163613da4bc5-scripts\") pod \"cinder-volume-volume1-0\" (UID: \"3bf15e01-8975-4617-b353-163613da4bc5\") " pod="openstack/cinder-volume-volume1-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.695507 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nzthw\" (UniqueName: \"kubernetes.io/projected/3bf15e01-8975-4617-b353-163613da4bc5-kube-api-access-nzthw\") pod \"cinder-volume-volume1-0\" (UID: \"3bf15e01-8975-4617-b353-163613da4bc5\") " pod="openstack/cinder-volume-volume1-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.695542 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/3bf15e01-8975-4617-b353-163613da4bc5-etc-machine-id\") pod \"cinder-volume-volume1-0\" (UID: \"3bf15e01-8975-4617-b353-163613da4bc5\") " pod="openstack/cinder-volume-volume1-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.695566 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/3bf15e01-8975-4617-b353-163613da4bc5-run\") pod \"cinder-volume-volume1-0\" (UID: \"3bf15e01-8975-4617-b353-163613da4bc5\") " pod="openstack/cinder-volume-volume1-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.695587 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/3bf15e01-8975-4617-b353-163613da4bc5-var-locks-brick\") pod \"cinder-volume-volume1-0\" (UID: \"3bf15e01-8975-4617-b353-163613da4bc5\") " pod="openstack/cinder-volume-volume1-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.695678 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/3bf15e01-8975-4617-b353-163613da4bc5-lib-modules\") pod \"cinder-volume-volume1-0\" (UID: \"3bf15e01-8975-4617-b353-163613da4bc5\") " pod="openstack/cinder-volume-volume1-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.695735 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3bf15e01-8975-4617-b353-163613da4bc5-combined-ca-bundle\") pod \"cinder-volume-volume1-0\" (UID: \"3bf15e01-8975-4617-b353-163613da4bc5\") " pod="openstack/cinder-volume-volume1-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.695823 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/3bf15e01-8975-4617-b353-163613da4bc5-ceph\") pod \"cinder-volume-volume1-0\" (UID: \"3bf15e01-8975-4617-b353-163613da4bc5\") " pod="openstack/cinder-volume-volume1-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.695878 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/3bf15e01-8975-4617-b353-163613da4bc5-sys\") pod \"cinder-volume-volume1-0\" (UID: \"3bf15e01-8975-4617-b353-163613da4bc5\") " pod="openstack/cinder-volume-volume1-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.695925 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3bf15e01-8975-4617-b353-163613da4bc5-config-data-custom\") pod \"cinder-volume-volume1-0\" (UID: \"3bf15e01-8975-4617-b353-163613da4bc5\") " pod="openstack/cinder-volume-volume1-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.696098 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/3bf15e01-8975-4617-b353-163613da4bc5-etc-nvme\") pod \"cinder-volume-volume1-0\" (UID: \"3bf15e01-8975-4617-b353-163613da4bc5\") " pod="openstack/cinder-volume-volume1-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.739669 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-backup-0"] Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.797521 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/3bf15e01-8975-4617-b353-163613da4bc5-ceph\") pod \"cinder-volume-volume1-0\" (UID: \"3bf15e01-8975-4617-b353-163613da4bc5\") " pod="openstack/cinder-volume-volume1-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.797562 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/3bf15e01-8975-4617-b353-163613da4bc5-sys\") pod \"cinder-volume-volume1-0\" (UID: \"3bf15e01-8975-4617-b353-163613da4bc5\") " pod="openstack/cinder-volume-volume1-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.797591 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3bf15e01-8975-4617-b353-163613da4bc5-config-data-custom\") pod \"cinder-volume-volume1-0\" (UID: \"3bf15e01-8975-4617-b353-163613da4bc5\") " pod="openstack/cinder-volume-volume1-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.797612 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/656b7d50-9049-415a-a4b1-08b531893110-config-data-custom\") pod \"cinder-backup-0\" (UID: \"656b7d50-9049-415a-a4b1-08b531893110\") " pod="openstack/cinder-backup-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.797628 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/656b7d50-9049-415a-a4b1-08b531893110-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"656b7d50-9049-415a-a4b1-08b531893110\") " pod="openstack/cinder-backup-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.797648 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/656b7d50-9049-415a-a4b1-08b531893110-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"656b7d50-9049-415a-a4b1-08b531893110\") " pod="openstack/cinder-backup-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.797671 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/3bf15e01-8975-4617-b353-163613da4bc5-etc-nvme\") pod \"cinder-volume-volume1-0\" (UID: \"3bf15e01-8975-4617-b353-163613da4bc5\") " pod="openstack/cinder-volume-volume1-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.797690 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/656b7d50-9049-415a-a4b1-08b531893110-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"656b7d50-9049-415a-a4b1-08b531893110\") " pod="openstack/cinder-backup-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.797705 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/656b7d50-9049-415a-a4b1-08b531893110-scripts\") pod \"cinder-backup-0\" (UID: \"656b7d50-9049-415a-a4b1-08b531893110\") " pod="openstack/cinder-backup-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.797724 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/656b7d50-9049-415a-a4b1-08b531893110-config-data\") pod \"cinder-backup-0\" (UID: \"656b7d50-9049-415a-a4b1-08b531893110\") " pod="openstack/cinder-backup-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.797740 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/656b7d50-9049-415a-a4b1-08b531893110-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"656b7d50-9049-415a-a4b1-08b531893110\") " pod="openstack/cinder-backup-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.797761 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/3bf15e01-8975-4617-b353-163613da4bc5-var-lib-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"3bf15e01-8975-4617-b353-163613da4bc5\") " pod="openstack/cinder-volume-volume1-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.797779 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3bf15e01-8975-4617-b353-163613da4bc5-config-data\") pod \"cinder-volume-volume1-0\" (UID: \"3bf15e01-8975-4617-b353-163613da4bc5\") " pod="openstack/cinder-volume-volume1-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.797795 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/656b7d50-9049-415a-a4b1-08b531893110-dev\") pod \"cinder-backup-0\" (UID: \"656b7d50-9049-415a-a4b1-08b531893110\") " pod="openstack/cinder-backup-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.797817 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/3bf15e01-8975-4617-b353-163613da4bc5-dev\") pod \"cinder-volume-volume1-0\" (UID: \"3bf15e01-8975-4617-b353-163613da4bc5\") " pod="openstack/cinder-volume-volume1-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.797833 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/3bf15e01-8975-4617-b353-163613da4bc5-etc-iscsi\") pod \"cinder-volume-volume1-0\" (UID: \"3bf15e01-8975-4617-b353-163613da4bc5\") " pod="openstack/cinder-volume-volume1-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.797850 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/3bf15e01-8975-4617-b353-163613da4bc5-var-locks-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"3bf15e01-8975-4617-b353-163613da4bc5\") " pod="openstack/cinder-volume-volume1-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.797869 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3bf15e01-8975-4617-b353-163613da4bc5-scripts\") pod \"cinder-volume-volume1-0\" (UID: \"3bf15e01-8975-4617-b353-163613da4bc5\") " pod="openstack/cinder-volume-volume1-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.797887 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/656b7d50-9049-415a-a4b1-08b531893110-lib-modules\") pod \"cinder-backup-0\" (UID: \"656b7d50-9049-415a-a4b1-08b531893110\") " pod="openstack/cinder-backup-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.797910 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nzthw\" (UniqueName: \"kubernetes.io/projected/3bf15e01-8975-4617-b353-163613da4bc5-kube-api-access-nzthw\") pod \"cinder-volume-volume1-0\" (UID: \"3bf15e01-8975-4617-b353-163613da4bc5\") " pod="openstack/cinder-volume-volume1-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.797953 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/656b7d50-9049-415a-a4b1-08b531893110-run\") pod \"cinder-backup-0\" (UID: \"656b7d50-9049-415a-a4b1-08b531893110\") " pod="openstack/cinder-backup-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.797975 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/3bf15e01-8975-4617-b353-163613da4bc5-etc-machine-id\") pod \"cinder-volume-volume1-0\" (UID: \"3bf15e01-8975-4617-b353-163613da4bc5\") " pod="openstack/cinder-volume-volume1-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.797995 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/656b7d50-9049-415a-a4b1-08b531893110-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"656b7d50-9049-415a-a4b1-08b531893110\") " pod="openstack/cinder-backup-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.798024 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/656b7d50-9049-415a-a4b1-08b531893110-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"656b7d50-9049-415a-a4b1-08b531893110\") " pod="openstack/cinder-backup-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.798043 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h2v6d\" (UniqueName: \"kubernetes.io/projected/656b7d50-9049-415a-a4b1-08b531893110-kube-api-access-h2v6d\") pod \"cinder-backup-0\" (UID: \"656b7d50-9049-415a-a4b1-08b531893110\") " pod="openstack/cinder-backup-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.798066 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/3bf15e01-8975-4617-b353-163613da4bc5-run\") pod \"cinder-volume-volume1-0\" (UID: \"3bf15e01-8975-4617-b353-163613da4bc5\") " pod="openstack/cinder-volume-volume1-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.798084 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/656b7d50-9049-415a-a4b1-08b531893110-etc-nvme\") pod \"cinder-backup-0\" (UID: \"656b7d50-9049-415a-a4b1-08b531893110\") " pod="openstack/cinder-backup-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.798101 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/3bf15e01-8975-4617-b353-163613da4bc5-var-locks-brick\") pod \"cinder-volume-volume1-0\" (UID: \"3bf15e01-8975-4617-b353-163613da4bc5\") " pod="openstack/cinder-volume-volume1-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.798118 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/3bf15e01-8975-4617-b353-163613da4bc5-lib-modules\") pod \"cinder-volume-volume1-0\" (UID: \"3bf15e01-8975-4617-b353-163613da4bc5\") " pod="openstack/cinder-volume-volume1-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.798138 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3bf15e01-8975-4617-b353-163613da4bc5-combined-ca-bundle\") pod \"cinder-volume-volume1-0\" (UID: \"3bf15e01-8975-4617-b353-163613da4bc5\") " pod="openstack/cinder-volume-volume1-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.798153 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/656b7d50-9049-415a-a4b1-08b531893110-sys\") pod \"cinder-backup-0\" (UID: \"656b7d50-9049-415a-a4b1-08b531893110\") " pod="openstack/cinder-backup-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.798179 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/656b7d50-9049-415a-a4b1-08b531893110-ceph\") pod \"cinder-backup-0\" (UID: \"656b7d50-9049-415a-a4b1-08b531893110\") " pod="openstack/cinder-backup-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.799122 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/3bf15e01-8975-4617-b353-163613da4bc5-etc-iscsi\") pod \"cinder-volume-volume1-0\" (UID: \"3bf15e01-8975-4617-b353-163613da4bc5\") " pod="openstack/cinder-volume-volume1-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.799154 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/3bf15e01-8975-4617-b353-163613da4bc5-sys\") pod \"cinder-volume-volume1-0\" (UID: \"3bf15e01-8975-4617-b353-163613da4bc5\") " pod="openstack/cinder-volume-volume1-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.800478 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/3bf15e01-8975-4617-b353-163613da4bc5-etc-nvme\") pod \"cinder-volume-volume1-0\" (UID: \"3bf15e01-8975-4617-b353-163613da4bc5\") " pod="openstack/cinder-volume-volume1-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.800688 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/3bf15e01-8975-4617-b353-163613da4bc5-var-locks-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"3bf15e01-8975-4617-b353-163613da4bc5\") " pod="openstack/cinder-volume-volume1-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.800959 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/3bf15e01-8975-4617-b353-163613da4bc5-var-locks-brick\") pod \"cinder-volume-volume1-0\" (UID: \"3bf15e01-8975-4617-b353-163613da4bc5\") " pod="openstack/cinder-volume-volume1-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.801048 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/3bf15e01-8975-4617-b353-163613da4bc5-run\") pod \"cinder-volume-volume1-0\" (UID: \"3bf15e01-8975-4617-b353-163613da4bc5\") " pod="openstack/cinder-volume-volume1-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.801135 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/3bf15e01-8975-4617-b353-163613da4bc5-lib-modules\") pod \"cinder-volume-volume1-0\" (UID: \"3bf15e01-8975-4617-b353-163613da4bc5\") " pod="openstack/cinder-volume-volume1-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.801229 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/3bf15e01-8975-4617-b353-163613da4bc5-var-lib-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"3bf15e01-8975-4617-b353-163613da4bc5\") " pod="openstack/cinder-volume-volume1-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.801265 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/3bf15e01-8975-4617-b353-163613da4bc5-etc-machine-id\") pod \"cinder-volume-volume1-0\" (UID: \"3bf15e01-8975-4617-b353-163613da4bc5\") " pod="openstack/cinder-volume-volume1-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.805311 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/3bf15e01-8975-4617-b353-163613da4bc5-dev\") pod \"cinder-volume-volume1-0\" (UID: \"3bf15e01-8975-4617-b353-163613da4bc5\") " pod="openstack/cinder-volume-volume1-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.810727 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3bf15e01-8975-4617-b353-163613da4bc5-config-data\") pod \"cinder-volume-volume1-0\" (UID: \"3bf15e01-8975-4617-b353-163613da4bc5\") " pod="openstack/cinder-volume-volume1-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.811872 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3bf15e01-8975-4617-b353-163613da4bc5-scripts\") pod \"cinder-volume-volume1-0\" (UID: \"3bf15e01-8975-4617-b353-163613da4bc5\") " pod="openstack/cinder-volume-volume1-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.816597 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3bf15e01-8975-4617-b353-163613da4bc5-config-data-custom\") pod \"cinder-volume-volume1-0\" (UID: \"3bf15e01-8975-4617-b353-163613da4bc5\") " pod="openstack/cinder-volume-volume1-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.809961 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3bf15e01-8975-4617-b353-163613da4bc5-combined-ca-bundle\") pod \"cinder-volume-volume1-0\" (UID: \"3bf15e01-8975-4617-b353-163613da4bc5\") " pod="openstack/cinder-volume-volume1-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.820613 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/3bf15e01-8975-4617-b353-163613da4bc5-ceph\") pod \"cinder-volume-volume1-0\" (UID: \"3bf15e01-8975-4617-b353-163613da4bc5\") " pod="openstack/cinder-volume-volume1-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.829639 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nzthw\" (UniqueName: \"kubernetes.io/projected/3bf15e01-8975-4617-b353-163613da4bc5-kube-api-access-nzthw\") pod \"cinder-volume-volume1-0\" (UID: \"3bf15e01-8975-4617-b353-163613da4bc5\") " pod="openstack/cinder-volume-volume1-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.899816 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/656b7d50-9049-415a-a4b1-08b531893110-run\") pod \"cinder-backup-0\" (UID: \"656b7d50-9049-415a-a4b1-08b531893110\") " pod="openstack/cinder-backup-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.899878 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/656b7d50-9049-415a-a4b1-08b531893110-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"656b7d50-9049-415a-a4b1-08b531893110\") " pod="openstack/cinder-backup-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.899896 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/656b7d50-9049-415a-a4b1-08b531893110-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"656b7d50-9049-415a-a4b1-08b531893110\") " pod="openstack/cinder-backup-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.899919 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h2v6d\" (UniqueName: \"kubernetes.io/projected/656b7d50-9049-415a-a4b1-08b531893110-kube-api-access-h2v6d\") pod \"cinder-backup-0\" (UID: \"656b7d50-9049-415a-a4b1-08b531893110\") " pod="openstack/cinder-backup-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.899948 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/656b7d50-9049-415a-a4b1-08b531893110-etc-nvme\") pod \"cinder-backup-0\" (UID: \"656b7d50-9049-415a-a4b1-08b531893110\") " pod="openstack/cinder-backup-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.899977 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/656b7d50-9049-415a-a4b1-08b531893110-sys\") pod \"cinder-backup-0\" (UID: \"656b7d50-9049-415a-a4b1-08b531893110\") " pod="openstack/cinder-backup-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.900021 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/656b7d50-9049-415a-a4b1-08b531893110-ceph\") pod \"cinder-backup-0\" (UID: \"656b7d50-9049-415a-a4b1-08b531893110\") " pod="openstack/cinder-backup-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.900065 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/656b7d50-9049-415a-a4b1-08b531893110-config-data-custom\") pod \"cinder-backup-0\" (UID: \"656b7d50-9049-415a-a4b1-08b531893110\") " pod="openstack/cinder-backup-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.900082 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/656b7d50-9049-415a-a4b1-08b531893110-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"656b7d50-9049-415a-a4b1-08b531893110\") " pod="openstack/cinder-backup-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.900106 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/656b7d50-9049-415a-a4b1-08b531893110-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"656b7d50-9049-415a-a4b1-08b531893110\") " pod="openstack/cinder-backup-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.900131 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/656b7d50-9049-415a-a4b1-08b531893110-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"656b7d50-9049-415a-a4b1-08b531893110\") " pod="openstack/cinder-backup-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.900144 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/656b7d50-9049-415a-a4b1-08b531893110-scripts\") pod \"cinder-backup-0\" (UID: \"656b7d50-9049-415a-a4b1-08b531893110\") " pod="openstack/cinder-backup-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.900158 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/656b7d50-9049-415a-a4b1-08b531893110-config-data\") pod \"cinder-backup-0\" (UID: \"656b7d50-9049-415a-a4b1-08b531893110\") " pod="openstack/cinder-backup-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.900179 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/656b7d50-9049-415a-a4b1-08b531893110-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"656b7d50-9049-415a-a4b1-08b531893110\") " pod="openstack/cinder-backup-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.900205 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/656b7d50-9049-415a-a4b1-08b531893110-dev\") pod \"cinder-backup-0\" (UID: \"656b7d50-9049-415a-a4b1-08b531893110\") " pod="openstack/cinder-backup-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.900236 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/656b7d50-9049-415a-a4b1-08b531893110-lib-modules\") pod \"cinder-backup-0\" (UID: \"656b7d50-9049-415a-a4b1-08b531893110\") " pod="openstack/cinder-backup-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.900312 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/656b7d50-9049-415a-a4b1-08b531893110-lib-modules\") pod \"cinder-backup-0\" (UID: \"656b7d50-9049-415a-a4b1-08b531893110\") " pod="openstack/cinder-backup-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.900348 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/656b7d50-9049-415a-a4b1-08b531893110-run\") pod \"cinder-backup-0\" (UID: \"656b7d50-9049-415a-a4b1-08b531893110\") " pod="openstack/cinder-backup-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.900369 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/656b7d50-9049-415a-a4b1-08b531893110-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"656b7d50-9049-415a-a4b1-08b531893110\") " pod="openstack/cinder-backup-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.900908 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/656b7d50-9049-415a-a4b1-08b531893110-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"656b7d50-9049-415a-a4b1-08b531893110\") " pod="openstack/cinder-backup-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.901244 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/656b7d50-9049-415a-a4b1-08b531893110-etc-nvme\") pod \"cinder-backup-0\" (UID: \"656b7d50-9049-415a-a4b1-08b531893110\") " pod="openstack/cinder-backup-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.901277 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/656b7d50-9049-415a-a4b1-08b531893110-sys\") pod \"cinder-backup-0\" (UID: \"656b7d50-9049-415a-a4b1-08b531893110\") " pod="openstack/cinder-backup-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.902899 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/656b7d50-9049-415a-a4b1-08b531893110-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"656b7d50-9049-415a-a4b1-08b531893110\") " pod="openstack/cinder-backup-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.903695 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-volume-volume1-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.904967 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/656b7d50-9049-415a-a4b1-08b531893110-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"656b7d50-9049-415a-a4b1-08b531893110\") " pod="openstack/cinder-backup-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.905273 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/656b7d50-9049-415a-a4b1-08b531893110-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"656b7d50-9049-415a-a4b1-08b531893110\") " pod="openstack/cinder-backup-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.905277 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/656b7d50-9049-415a-a4b1-08b531893110-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"656b7d50-9049-415a-a4b1-08b531893110\") " pod="openstack/cinder-backup-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.905305 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/656b7d50-9049-415a-a4b1-08b531893110-dev\") pod \"cinder-backup-0\" (UID: \"656b7d50-9049-415a-a4b1-08b531893110\") " pod="openstack/cinder-backup-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.905647 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/656b7d50-9049-415a-a4b1-08b531893110-ceph\") pod \"cinder-backup-0\" (UID: \"656b7d50-9049-415a-a4b1-08b531893110\") " pod="openstack/cinder-backup-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.907737 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/656b7d50-9049-415a-a4b1-08b531893110-scripts\") pod \"cinder-backup-0\" (UID: \"656b7d50-9049-415a-a4b1-08b531893110\") " pod="openstack/cinder-backup-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.910244 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/656b7d50-9049-415a-a4b1-08b531893110-config-data\") pod \"cinder-backup-0\" (UID: \"656b7d50-9049-415a-a4b1-08b531893110\") " pod="openstack/cinder-backup-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.913234 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/656b7d50-9049-415a-a4b1-08b531893110-config-data-custom\") pod \"cinder-backup-0\" (UID: \"656b7d50-9049-415a-a4b1-08b531893110\") " pod="openstack/cinder-backup-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.925532 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h2v6d\" (UniqueName: \"kubernetes.io/projected/656b7d50-9049-415a-a4b1-08b531893110-kube-api-access-h2v6d\") pod \"cinder-backup-0\" (UID: \"656b7d50-9049-415a-a4b1-08b531893110\") " pod="openstack/cinder-backup-0" Nov 21 14:44:08 crc kubenswrapper[5133]: I1121 14:44:08.992480 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-backup-0" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.247067 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-db-create-2zdfp"] Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.249135 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-create-2zdfp" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.255271 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-db-create-2zdfp"] Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.317331 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0c2588b0-ad67-4806-a52c-7b0e7a7302c8-operator-scripts\") pod \"manila-db-create-2zdfp\" (UID: \"0c2588b0-ad67-4806-a52c-7b0e7a7302c8\") " pod="openstack/manila-db-create-2zdfp" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.317438 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4bf2k\" (UniqueName: \"kubernetes.io/projected/0c2588b0-ad67-4806-a52c-7b0e7a7302c8-kube-api-access-4bf2k\") pod \"manila-db-create-2zdfp\" (UID: \"0c2588b0-ad67-4806-a52c-7b0e7a7302c8\") " pod="openstack/manila-db-create-2zdfp" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.362910 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-88e9-account-create-kv6xr"] Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.364147 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-88e9-account-create-kv6xr" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.367274 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-db-secret" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.379898 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-88e9-account-create-kv6xr"] Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.419391 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0c2588b0-ad67-4806-a52c-7b0e7a7302c8-operator-scripts\") pod \"manila-db-create-2zdfp\" (UID: \"0c2588b0-ad67-4806-a52c-7b0e7a7302c8\") " pod="openstack/manila-db-create-2zdfp" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.419442 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-28db6\" (UniqueName: \"kubernetes.io/projected/6dd226ff-efe4-4adb-b60f-68641e1226a8-kube-api-access-28db6\") pod \"manila-88e9-account-create-kv6xr\" (UID: \"6dd226ff-efe4-4adb-b60f-68641e1226a8\") " pod="openstack/manila-88e9-account-create-kv6xr" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.419488 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6dd226ff-efe4-4adb-b60f-68641e1226a8-operator-scripts\") pod \"manila-88e9-account-create-kv6xr\" (UID: \"6dd226ff-efe4-4adb-b60f-68641e1226a8\") " pod="openstack/manila-88e9-account-create-kv6xr" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.419526 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4bf2k\" (UniqueName: \"kubernetes.io/projected/0c2588b0-ad67-4806-a52c-7b0e7a7302c8-kube-api-access-4bf2k\") pod \"manila-db-create-2zdfp\" (UID: \"0c2588b0-ad67-4806-a52c-7b0e7a7302c8\") " pod="openstack/manila-db-create-2zdfp" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.420769 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0c2588b0-ad67-4806-a52c-7b0e7a7302c8-operator-scripts\") pod \"manila-db-create-2zdfp\" (UID: \"0c2588b0-ad67-4806-a52c-7b0e7a7302c8\") " pod="openstack/manila-db-create-2zdfp" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.437163 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.438604 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.441413 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.441647 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.441754 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-cvwrt" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.441863 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.453958 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4bf2k\" (UniqueName: \"kubernetes.io/projected/0c2588b0-ad67-4806-a52c-7b0e7a7302c8-kube-api-access-4bf2k\") pod \"manila-db-create-2zdfp\" (UID: \"0c2588b0-ad67-4806-a52c-7b0e7a7302c8\") " pod="openstack/manila-db-create-2zdfp" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.459556 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.496486 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-volume-volume1-0"] Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.522961 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.528467 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.530078 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.531669 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.531707 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-28db6\" (UniqueName: \"kubernetes.io/projected/6dd226ff-efe4-4adb-b60f-68641e1226a8-kube-api-access-28db6\") pod \"manila-88e9-account-create-kv6xr\" (UID: \"6dd226ff-efe4-4adb-b60f-68641e1226a8\") " pod="openstack/manila-88e9-account-create-kv6xr" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.531756 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.531817 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6dd226ff-efe4-4adb-b60f-68641e1226a8-operator-scripts\") pod \"manila-88e9-account-create-kv6xr\" (UID: \"6dd226ff-efe4-4adb-b60f-68641e1226a8\") " pod="openstack/manila-88e9-account-create-kv6xr" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.531870 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dcqn2\" (UniqueName: \"kubernetes.io/projected/a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5-kube-api-access-dcqn2\") pod \"glance-default-external-api-0\" (UID: \"a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.531926 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5-logs\") pod \"glance-default-external-api-0\" (UID: \"a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.532050 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5-config-data\") pod \"glance-default-external-api-0\" (UID: \"a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.532127 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5-ceph\") pod \"glance-default-external-api-0\" (UID: \"a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.532150 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.532176 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5-scripts\") pod \"glance-default-external-api-0\" (UID: \"a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.533390 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6dd226ff-efe4-4adb-b60f-68641e1226a8-operator-scripts\") pod \"manila-88e9-account-create-kv6xr\" (UID: \"6dd226ff-efe4-4adb-b60f-68641e1226a8\") " pod="openstack/manila-88e9-account-create-kv6xr" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.536971 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.537200 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.541722 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.552392 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-85888d8669-n7vg6"] Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.553922 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-85888d8669-n7vg6" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.558086 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon-horizon-dockercfg-hmrzf" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.558246 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.558349 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-scripts" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.561768 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-config-data" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.561955 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-85888d8669-n7vg6"] Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.563126 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-28db6\" (UniqueName: \"kubernetes.io/projected/6dd226ff-efe4-4adb-b60f-68641e1226a8-kube-api-access-28db6\") pod \"manila-88e9-account-create-kv6xr\" (UID: \"6dd226ff-efe4-4adb-b60f-68641e1226a8\") " pod="openstack/manila-88e9-account-create-kv6xr" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.570074 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 21 14:44:09 crc kubenswrapper[5133]: E1121 14:44:09.570790 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[ceph combined-ca-bundle config-data glance httpd-run kube-api-access-dcqn2 logs public-tls-certs scripts], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/glance-default-external-api-0" podUID="a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.597257 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-create-2zdfp" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.627783 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-c44865549-jt2j7"] Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.629171 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-c44865549-jt2j7" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.636376 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.636435 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"0c78cf80-ceb7-4407-a970-32a803307e25\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.636470 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.636502 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0c78cf80-ceb7-4407-a970-32a803307e25-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"0c78cf80-ceb7-4407-a970-32a803307e25\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.636533 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c78cf80-ceb7-4407-a970-32a803307e25-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"0c78cf80-ceb7-4407-a970-32a803307e25\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.636563 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dcqn2\" (UniqueName: \"kubernetes.io/projected/a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5-kube-api-access-dcqn2\") pod \"glance-default-external-api-0\" (UID: \"a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.636589 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6chgb\" (UniqueName: \"kubernetes.io/projected/120ff136-ecbc-4f53-9338-d1435003710a-kube-api-access-6chgb\") pod \"horizon-85888d8669-n7vg6\" (UID: \"120ff136-ecbc-4f53-9338-d1435003710a\") " pod="openstack/horizon-85888d8669-n7vg6" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.636613 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5-logs\") pod \"glance-default-external-api-0\" (UID: \"a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.636650 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0c78cf80-ceb7-4407-a970-32a803307e25-logs\") pod \"glance-default-internal-api-0\" (UID: \"0c78cf80-ceb7-4407-a970-32a803307e25\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.636679 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/120ff136-ecbc-4f53-9338-d1435003710a-logs\") pod \"horizon-85888d8669-n7vg6\" (UID: \"120ff136-ecbc-4f53-9338-d1435003710a\") " pod="openstack/horizon-85888d8669-n7vg6" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.636702 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/120ff136-ecbc-4f53-9338-d1435003710a-config-data\") pod \"horizon-85888d8669-n7vg6\" (UID: \"120ff136-ecbc-4f53-9338-d1435003710a\") " pod="openstack/horizon-85888d8669-n7vg6" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.636728 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5-config-data\") pod \"glance-default-external-api-0\" (UID: \"a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.636756 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c78cf80-ceb7-4407-a970-32a803307e25-config-data\") pod \"glance-default-internal-api-0\" (UID: \"0c78cf80-ceb7-4407-a970-32a803307e25\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.636778 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9wwsk\" (UniqueName: \"kubernetes.io/projected/0c78cf80-ceb7-4407-a970-32a803307e25-kube-api-access-9wwsk\") pod \"glance-default-internal-api-0\" (UID: \"0c78cf80-ceb7-4407-a970-32a803307e25\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.636801 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5-ceph\") pod \"glance-default-external-api-0\" (UID: \"a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.636819 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.636841 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/0c78cf80-ceb7-4407-a970-32a803307e25-ceph\") pod \"glance-default-internal-api-0\" (UID: \"0c78cf80-ceb7-4407-a970-32a803307e25\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.636863 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5-scripts\") pod \"glance-default-external-api-0\" (UID: \"a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.636887 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/120ff136-ecbc-4f53-9338-d1435003710a-scripts\") pod \"horizon-85888d8669-n7vg6\" (UID: \"120ff136-ecbc-4f53-9338-d1435003710a\") " pod="openstack/horizon-85888d8669-n7vg6" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.636926 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.636953 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0c78cf80-ceb7-4407-a970-32a803307e25-scripts\") pod \"glance-default-internal-api-0\" (UID: \"0c78cf80-ceb7-4407-a970-32a803307e25\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.636971 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0c78cf80-ceb7-4407-a970-32a803307e25-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"0c78cf80-ceb7-4407-a970-32a803307e25\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.637013 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/120ff136-ecbc-4f53-9338-d1435003710a-horizon-secret-key\") pod \"horizon-85888d8669-n7vg6\" (UID: \"120ff136-ecbc-4f53-9338-d1435003710a\") " pod="openstack/horizon-85888d8669-n7vg6" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.643348 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.643835 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.644277 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5-logs\") pod \"glance-default-external-api-0\" (UID: \"a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.645491 5133 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/glance-default-external-api-0" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.680066 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.680991 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5-config-data\") pod \"glance-default-external-api-0\" (UID: \"a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.681596 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5-ceph\") pod \"glance-default-external-api-0\" (UID: \"a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.683794 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-88e9-account-create-kv6xr" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.699558 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5-scripts\") pod \"glance-default-external-api-0\" (UID: \"a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.701758 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dcqn2\" (UniqueName: \"kubernetes.io/projected/a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5-kube-api-access-dcqn2\") pod \"glance-default-external-api-0\" (UID: \"a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.701821 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 21 14:44:09 crc kubenswrapper[5133]: E1121 14:44:09.704687 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[ceph combined-ca-bundle config-data glance httpd-run internal-tls-certs kube-api-access-9wwsk logs scripts], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/glance-default-internal-api-0" podUID="0c78cf80-ceb7-4407-a970-32a803307e25" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.739609 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0c78cf80-ceb7-4407-a970-32a803307e25-logs\") pod \"glance-default-internal-api-0\" (UID: \"0c78cf80-ceb7-4407-a970-32a803307e25\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.739677 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/120ff136-ecbc-4f53-9338-d1435003710a-logs\") pod \"horizon-85888d8669-n7vg6\" (UID: \"120ff136-ecbc-4f53-9338-d1435003710a\") " pod="openstack/horizon-85888d8669-n7vg6" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.739715 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/120ff136-ecbc-4f53-9338-d1435003710a-config-data\") pod \"horizon-85888d8669-n7vg6\" (UID: \"120ff136-ecbc-4f53-9338-d1435003710a\") " pod="openstack/horizon-85888d8669-n7vg6" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.739761 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c78cf80-ceb7-4407-a970-32a803307e25-config-data\") pod \"glance-default-internal-api-0\" (UID: \"0c78cf80-ceb7-4407-a970-32a803307e25\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.739811 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9wwsk\" (UniqueName: \"kubernetes.io/projected/0c78cf80-ceb7-4407-a970-32a803307e25-kube-api-access-9wwsk\") pod \"glance-default-internal-api-0\" (UID: \"0c78cf80-ceb7-4407-a970-32a803307e25\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.739927 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/0c78cf80-ceb7-4407-a970-32a803307e25-ceph\") pod \"glance-default-internal-api-0\" (UID: \"0c78cf80-ceb7-4407-a970-32a803307e25\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.739966 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/120ff136-ecbc-4f53-9338-d1435003710a-scripts\") pod \"horizon-85888d8669-n7vg6\" (UID: \"120ff136-ecbc-4f53-9338-d1435003710a\") " pod="openstack/horizon-85888d8669-n7vg6" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.740030 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a343caf3-f248-4c28-8131-d074688735ad-scripts\") pod \"horizon-c44865549-jt2j7\" (UID: \"a343caf3-f248-4c28-8131-d074688735ad\") " pod="openstack/horizon-c44865549-jt2j7" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.740062 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a343caf3-f248-4c28-8131-d074688735ad-config-data\") pod \"horizon-c44865549-jt2j7\" (UID: \"a343caf3-f248-4c28-8131-d074688735ad\") " pod="openstack/horizon-c44865549-jt2j7" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.740097 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0c78cf80-ceb7-4407-a970-32a803307e25-scripts\") pod \"glance-default-internal-api-0\" (UID: \"0c78cf80-ceb7-4407-a970-32a803307e25\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.740127 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0c78cf80-ceb7-4407-a970-32a803307e25-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"0c78cf80-ceb7-4407-a970-32a803307e25\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.740153 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a343caf3-f248-4c28-8131-d074688735ad-logs\") pod \"horizon-c44865549-jt2j7\" (UID: \"a343caf3-f248-4c28-8131-d074688735ad\") " pod="openstack/horizon-c44865549-jt2j7" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.740187 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/120ff136-ecbc-4f53-9338-d1435003710a-horizon-secret-key\") pod \"horizon-85888d8669-n7vg6\" (UID: \"120ff136-ecbc-4f53-9338-d1435003710a\") " pod="openstack/horizon-85888d8669-n7vg6" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.740250 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qxllz\" (UniqueName: \"kubernetes.io/projected/a343caf3-f248-4c28-8131-d074688735ad-kube-api-access-qxllz\") pod \"horizon-c44865549-jt2j7\" (UID: \"a343caf3-f248-4c28-8131-d074688735ad\") " pod="openstack/horizon-c44865549-jt2j7" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.740292 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"0c78cf80-ceb7-4407-a970-32a803307e25\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.740327 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0c78cf80-ceb7-4407-a970-32a803307e25-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"0c78cf80-ceb7-4407-a970-32a803307e25\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.740364 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c78cf80-ceb7-4407-a970-32a803307e25-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"0c78cf80-ceb7-4407-a970-32a803307e25\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.740405 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/a343caf3-f248-4c28-8131-d074688735ad-horizon-secret-key\") pod \"horizon-c44865549-jt2j7\" (UID: \"a343caf3-f248-4c28-8131-d074688735ad\") " pod="openstack/horizon-c44865549-jt2j7" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.740435 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6chgb\" (UniqueName: \"kubernetes.io/projected/120ff136-ecbc-4f53-9338-d1435003710a-kube-api-access-6chgb\") pod \"horizon-85888d8669-n7vg6\" (UID: \"120ff136-ecbc-4f53-9338-d1435003710a\") " pod="openstack/horizon-85888d8669-n7vg6" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.740778 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/120ff136-ecbc-4f53-9338-d1435003710a-logs\") pod \"horizon-85888d8669-n7vg6\" (UID: \"120ff136-ecbc-4f53-9338-d1435003710a\") " pod="openstack/horizon-85888d8669-n7vg6" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.741152 5133 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"0c78cf80-ceb7-4407-a970-32a803307e25\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/glance-default-internal-api-0" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.741383 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0c78cf80-ceb7-4407-a970-32a803307e25-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"0c78cf80-ceb7-4407-a970-32a803307e25\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.741689 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0c78cf80-ceb7-4407-a970-32a803307e25-logs\") pod \"glance-default-internal-api-0\" (UID: \"0c78cf80-ceb7-4407-a970-32a803307e25\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.741867 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-c44865549-jt2j7"] Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.744844 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c78cf80-ceb7-4407-a970-32a803307e25-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"0c78cf80-ceb7-4407-a970-32a803307e25\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.746026 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/120ff136-ecbc-4f53-9338-d1435003710a-scripts\") pod \"horizon-85888d8669-n7vg6\" (UID: \"120ff136-ecbc-4f53-9338-d1435003710a\") " pod="openstack/horizon-85888d8669-n7vg6" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.746900 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0c78cf80-ceb7-4407-a970-32a803307e25-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"0c78cf80-ceb7-4407-a970-32a803307e25\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.752303 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.756190 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/120ff136-ecbc-4f53-9338-d1435003710a-config-data\") pod \"horizon-85888d8669-n7vg6\" (UID: \"120ff136-ecbc-4f53-9338-d1435003710a\") " pod="openstack/horizon-85888d8669-n7vg6" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.765965 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/120ff136-ecbc-4f53-9338-d1435003710a-horizon-secret-key\") pod \"horizon-85888d8669-n7vg6\" (UID: \"120ff136-ecbc-4f53-9338-d1435003710a\") " pod="openstack/horizon-85888d8669-n7vg6" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.765988 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0c78cf80-ceb7-4407-a970-32a803307e25-scripts\") pod \"glance-default-internal-api-0\" (UID: \"0c78cf80-ceb7-4407-a970-32a803307e25\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.770411 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c78cf80-ceb7-4407-a970-32a803307e25-config-data\") pod \"glance-default-internal-api-0\" (UID: \"0c78cf80-ceb7-4407-a970-32a803307e25\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.783895 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/0c78cf80-ceb7-4407-a970-32a803307e25-ceph\") pod \"glance-default-internal-api-0\" (UID: \"0c78cf80-ceb7-4407-a970-32a803307e25\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.789627 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9wwsk\" (UniqueName: \"kubernetes.io/projected/0c78cf80-ceb7-4407-a970-32a803307e25-kube-api-access-9wwsk\") pod \"glance-default-internal-api-0\" (UID: \"0c78cf80-ceb7-4407-a970-32a803307e25\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.808358 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"0c78cf80-ceb7-4407-a970-32a803307e25\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.874970 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.875043 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-volume1-0" event={"ID":"3bf15e01-8975-4617-b353-163613da4bc5","Type":"ContainerStarted","Data":"16425fe1711553067b99efd187e433737b9d494da20f53553ebdd31da945ec57"} Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.875174 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.895418 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-backup-0"] Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.912357 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6chgb\" (UniqueName: \"kubernetes.io/projected/120ff136-ecbc-4f53-9338-d1435003710a-kube-api-access-6chgb\") pod \"horizon-85888d8669-n7vg6\" (UID: \"120ff136-ecbc-4f53-9338-d1435003710a\") " pod="openstack/horizon-85888d8669-n7vg6" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.928549 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-85888d8669-n7vg6" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.929710 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qxllz\" (UniqueName: \"kubernetes.io/projected/a343caf3-f248-4c28-8131-d074688735ad-kube-api-access-qxllz\") pod \"horizon-c44865549-jt2j7\" (UID: \"a343caf3-f248-4c28-8131-d074688735ad\") " pod="openstack/horizon-c44865549-jt2j7" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.929786 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/a343caf3-f248-4c28-8131-d074688735ad-horizon-secret-key\") pod \"horizon-c44865549-jt2j7\" (UID: \"a343caf3-f248-4c28-8131-d074688735ad\") " pod="openstack/horizon-c44865549-jt2j7" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.929908 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a343caf3-f248-4c28-8131-d074688735ad-scripts\") pod \"horizon-c44865549-jt2j7\" (UID: \"a343caf3-f248-4c28-8131-d074688735ad\") " pod="openstack/horizon-c44865549-jt2j7" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.929930 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a343caf3-f248-4c28-8131-d074688735ad-config-data\") pod \"horizon-c44865549-jt2j7\" (UID: \"a343caf3-f248-4c28-8131-d074688735ad\") " pod="openstack/horizon-c44865549-jt2j7" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.929964 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a343caf3-f248-4c28-8131-d074688735ad-logs\") pod \"horizon-c44865549-jt2j7\" (UID: \"a343caf3-f248-4c28-8131-d074688735ad\") " pod="openstack/horizon-c44865549-jt2j7" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.930365 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a343caf3-f248-4c28-8131-d074688735ad-logs\") pod \"horizon-c44865549-jt2j7\" (UID: \"a343caf3-f248-4c28-8131-d074688735ad\") " pod="openstack/horizon-c44865549-jt2j7" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.936777 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a343caf3-f248-4c28-8131-d074688735ad-scripts\") pod \"horizon-c44865549-jt2j7\" (UID: \"a343caf3-f248-4c28-8131-d074688735ad\") " pod="openstack/horizon-c44865549-jt2j7" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.937867 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a343caf3-f248-4c28-8131-d074688735ad-config-data\") pod \"horizon-c44865549-jt2j7\" (UID: \"a343caf3-f248-4c28-8131-d074688735ad\") " pod="openstack/horizon-c44865549-jt2j7" Nov 21 14:44:09 crc kubenswrapper[5133]: W1121 14:44:09.963023 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod656b7d50_9049_415a_a4b1_08b531893110.slice/crio-88a58787c0795dc8688fd76bce81a040708fa09b0819697694efaf114ea59ac1 WatchSource:0}: Error finding container 88a58787c0795dc8688fd76bce81a040708fa09b0819697694efaf114ea59ac1: Status 404 returned error can't find the container with id 88a58787c0795dc8688fd76bce81a040708fa09b0819697694efaf114ea59ac1 Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.970536 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/a343caf3-f248-4c28-8131-d074688735ad-horizon-secret-key\") pod \"horizon-c44865549-jt2j7\" (UID: \"a343caf3-f248-4c28-8131-d074688735ad\") " pod="openstack/horizon-c44865549-jt2j7" Nov 21 14:44:09 crc kubenswrapper[5133]: I1121 14:44:09.987317 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qxllz\" (UniqueName: \"kubernetes.io/projected/a343caf3-f248-4c28-8131-d074688735ad-kube-api-access-qxllz\") pod \"horizon-c44865549-jt2j7\" (UID: \"a343caf3-f248-4c28-8131-d074688735ad\") " pod="openstack/horizon-c44865549-jt2j7" Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.010912 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-c44865549-jt2j7" Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.110792 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.154796 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.161619 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9wwsk\" (UniqueName: \"kubernetes.io/projected/0c78cf80-ceb7-4407-a970-32a803307e25-kube-api-access-9wwsk\") pod \"0c78cf80-ceb7-4407-a970-32a803307e25\" (UID: \"0c78cf80-ceb7-4407-a970-32a803307e25\") " Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.161678 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5-logs\") pod \"a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5\" (UID: \"a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5\") " Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.161705 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5-public-tls-certs\") pod \"a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5\" (UID: \"a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5\") " Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.161735 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5-ceph\") pod \"a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5\" (UID: \"a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5\") " Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.161751 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"0c78cf80-ceb7-4407-a970-32a803307e25\" (UID: \"0c78cf80-ceb7-4407-a970-32a803307e25\") " Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.161770 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0c78cf80-ceb7-4407-a970-32a803307e25-logs\") pod \"0c78cf80-ceb7-4407-a970-32a803307e25\" (UID: \"0c78cf80-ceb7-4407-a970-32a803307e25\") " Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.161787 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5-combined-ca-bundle\") pod \"a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5\" (UID: \"a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5\") " Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.161834 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c78cf80-ceb7-4407-a970-32a803307e25-config-data\") pod \"0c78cf80-ceb7-4407-a970-32a803307e25\" (UID: \"0c78cf80-ceb7-4407-a970-32a803307e25\") " Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.161855 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/0c78cf80-ceb7-4407-a970-32a803307e25-ceph\") pod \"0c78cf80-ceb7-4407-a970-32a803307e25\" (UID: \"0c78cf80-ceb7-4407-a970-32a803307e25\") " Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.161875 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0c78cf80-ceb7-4407-a970-32a803307e25-scripts\") pod \"0c78cf80-ceb7-4407-a970-32a803307e25\" (UID: \"0c78cf80-ceb7-4407-a970-32a803307e25\") " Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.161896 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5-scripts\") pod \"a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5\" (UID: \"a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5\") " Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.161923 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0c78cf80-ceb7-4407-a970-32a803307e25-internal-tls-certs\") pod \"0c78cf80-ceb7-4407-a970-32a803307e25\" (UID: \"0c78cf80-ceb7-4407-a970-32a803307e25\") " Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.161941 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dcqn2\" (UniqueName: \"kubernetes.io/projected/a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5-kube-api-access-dcqn2\") pod \"a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5\" (UID: \"a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5\") " Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.161977 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c78cf80-ceb7-4407-a970-32a803307e25-combined-ca-bundle\") pod \"0c78cf80-ceb7-4407-a970-32a803307e25\" (UID: \"0c78cf80-ceb7-4407-a970-32a803307e25\") " Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.162028 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5-config-data\") pod \"a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5\" (UID: \"a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5\") " Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.162050 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5-httpd-run\") pod \"a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5\" (UID: \"a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5\") " Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.162064 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5\" (UID: \"a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5\") " Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.162093 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0c78cf80-ceb7-4407-a970-32a803307e25-httpd-run\") pod \"0c78cf80-ceb7-4407-a970-32a803307e25\" (UID: \"0c78cf80-ceb7-4407-a970-32a803307e25\") " Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.162594 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0c78cf80-ceb7-4407-a970-32a803307e25-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "0c78cf80-ceb7-4407-a970-32a803307e25" (UID: "0c78cf80-ceb7-4407-a970-32a803307e25"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.188856 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c78cf80-ceb7-4407-a970-32a803307e25-scripts" (OuterVolumeSpecName: "scripts") pod "0c78cf80-ceb7-4407-a970-32a803307e25" (UID: "0c78cf80-ceb7-4407-a970-32a803307e25"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.190817 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage09-crc" (OuterVolumeSpecName: "glance") pod "a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5" (UID: "a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5"). InnerVolumeSpecName "local-storage09-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.191262 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5-config-data" (OuterVolumeSpecName: "config-data") pod "a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5" (UID: "a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.194030 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage04-crc" (OuterVolumeSpecName: "glance") pod "0c78cf80-ceb7-4407-a970-32a803307e25" (UID: "0c78cf80-ceb7-4407-a970-32a803307e25"). InnerVolumeSpecName "local-storage04-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.194119 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5-scripts" (OuterVolumeSpecName: "scripts") pod "a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5" (UID: "a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.194330 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5-logs" (OuterVolumeSpecName: "logs") pod "a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5" (UID: "a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.196856 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c78cf80-ceb7-4407-a970-32a803307e25-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "0c78cf80-ceb7-4407-a970-32a803307e25" (UID: "0c78cf80-ceb7-4407-a970-32a803307e25"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.197324 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5" (UID: "a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.199323 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c78cf80-ceb7-4407-a970-32a803307e25-config-data" (OuterVolumeSpecName: "config-data") pod "0c78cf80-ceb7-4407-a970-32a803307e25" (UID: "0c78cf80-ceb7-4407-a970-32a803307e25"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.199809 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0c78cf80-ceb7-4407-a970-32a803307e25-kube-api-access-9wwsk" (OuterVolumeSpecName: "kube-api-access-9wwsk") pod "0c78cf80-ceb7-4407-a970-32a803307e25" (UID: "0c78cf80-ceb7-4407-a970-32a803307e25"). InnerVolumeSpecName "kube-api-access-9wwsk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.199891 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0c78cf80-ceb7-4407-a970-32a803307e25-ceph" (OuterVolumeSpecName: "ceph") pod "0c78cf80-ceb7-4407-a970-32a803307e25" (UID: "0c78cf80-ceb7-4407-a970-32a803307e25"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.201971 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5" (UID: "a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.202177 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5-ceph" (OuterVolumeSpecName: "ceph") pod "a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5" (UID: "a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.205232 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c78cf80-ceb7-4407-a970-32a803307e25-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0c78cf80-ceb7-4407-a970-32a803307e25" (UID: "0c78cf80-ceb7-4407-a970-32a803307e25"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.208448 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5" (UID: "a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.209155 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0c78cf80-ceb7-4407-a970-32a803307e25-logs" (OuterVolumeSpecName: "logs") pod "0c78cf80-ceb7-4407-a970-32a803307e25" (UID: "0c78cf80-ceb7-4407-a970-32a803307e25"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.213989 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5-kube-api-access-dcqn2" (OuterVolumeSpecName: "kube-api-access-dcqn2") pod "a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5" (UID: "a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5"). InnerVolumeSpecName "kube-api-access-dcqn2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.267641 5133 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5-logs\") on node \"crc\" DevicePath \"\"" Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.267673 5133 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.267683 5133 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5-ceph\") on node \"crc\" DevicePath \"\"" Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.267720 5133 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" " Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.267731 5133 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0c78cf80-ceb7-4407-a970-32a803307e25-logs\") on node \"crc\" DevicePath \"\"" Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.267741 5133 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.267749 5133 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c78cf80-ceb7-4407-a970-32a803307e25-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.267759 5133 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/0c78cf80-ceb7-4407-a970-32a803307e25-ceph\") on node \"crc\" DevicePath \"\"" Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.267767 5133 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0c78cf80-ceb7-4407-a970-32a803307e25-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.267775 5133 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.267783 5133 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0c78cf80-ceb7-4407-a970-32a803307e25-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.267791 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dcqn2\" (UniqueName: \"kubernetes.io/projected/a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5-kube-api-access-dcqn2\") on node \"crc\" DevicePath \"\"" Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.273119 5133 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c78cf80-ceb7-4407-a970-32a803307e25-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.273143 5133 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.273155 5133 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.273209 5133 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" " Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.273223 5133 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0c78cf80-ceb7-4407-a970-32a803307e25-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.273233 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9wwsk\" (UniqueName: \"kubernetes.io/projected/0c78cf80-ceb7-4407-a970-32a803307e25-kube-api-access-9wwsk\") on node \"crc\" DevicePath \"\"" Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.293381 5133 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage04-crc" (UniqueName: "kubernetes.io/local-volume/local-storage04-crc") on node "crc" Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.322183 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-db-create-2zdfp"] Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.380545 5133 reconciler_common.go:293] "Volume detached for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" DevicePath \"\"" Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.380675 5133 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage09-crc" (UniqueName: "kubernetes.io/local-volume/local-storage09-crc") on node "crc" Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.482319 5133 reconciler_common.go:293] "Volume detached for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" DevicePath \"\"" Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.629956 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-88e9-account-create-kv6xr"] Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.641379 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-c44865549-jt2j7"] Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.772949 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-85888d8669-n7vg6"] Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.883764 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-create-2zdfp" event={"ID":"0c2588b0-ad67-4806-a52c-7b0e7a7302c8","Type":"ContainerStarted","Data":"52739d85d415cfc0285cc8145a82c3ab74c0fc1060ff3b27b2fa8baab4851faf"} Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.884628 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.885082 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-backup-0" event={"ID":"656b7d50-9049-415a-a4b1-08b531893110","Type":"ContainerStarted","Data":"88a58787c0795dc8688fd76bce81a040708fa09b0819697694efaf114ea59ac1"} Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.885106 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.939933 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.961087 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.986555 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.988964 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.998240 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.998659 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.998899 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-cvwrt" Nov 21 14:44:10 crc kubenswrapper[5133]: I1121 14:44:10.999153 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.022356 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.040924 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.049709 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.061251 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.063447 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.071491 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.072326 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.089523 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 21 14:44:11 crc kubenswrapper[5133]: W1121 14:44:11.118279 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod120ff136_ecbc_4f53_9338_d1435003710a.slice/crio-d0714f83e8b9cd2c56b79360582a73e54d154a266477b19465491378707b14b6 WatchSource:0}: Error finding container d0714f83e8b9cd2c56b79360582a73e54d154a266477b19465491378707b14b6: Status 404 returned error can't find the container with id d0714f83e8b9cd2c56b79360582a73e54d154a266477b19465491378707b14b6 Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.199621 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/1908b0b1-dafd-46cb-ae0b-2c86778035f3-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"1908b0b1-dafd-46cb-ae0b-2c86778035f3\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.200350 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1908b0b1-dafd-46cb-ae0b-2c86778035f3-scripts\") pod \"glance-default-external-api-0\" (UID: \"1908b0b1-dafd-46cb-ae0b-2c86778035f3\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.200442 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/1908b0b1-dafd-46cb-ae0b-2c86778035f3-ceph\") pod \"glance-default-external-api-0\" (UID: \"1908b0b1-dafd-46cb-ae0b-2c86778035f3\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.200489 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ee189c5e-52ad-4eaf-8415-df2b7f696a0a-scripts\") pod \"glance-default-internal-api-0\" (UID: \"ee189c5e-52ad-4eaf-8415-df2b7f696a0a\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.200516 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"1908b0b1-dafd-46cb-ae0b-2c86778035f3\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.200546 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ee189c5e-52ad-4eaf-8415-df2b7f696a0a-logs\") pod \"glance-default-internal-api-0\" (UID: \"ee189c5e-52ad-4eaf-8415-df2b7f696a0a\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.200588 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/ee189c5e-52ad-4eaf-8415-df2b7f696a0a-ceph\") pod \"glance-default-internal-api-0\" (UID: \"ee189c5e-52ad-4eaf-8415-df2b7f696a0a\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.200626 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1908b0b1-dafd-46cb-ae0b-2c86778035f3-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"1908b0b1-dafd-46cb-ae0b-2c86778035f3\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.200652 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mszkm\" (UniqueName: \"kubernetes.io/projected/1908b0b1-dafd-46cb-ae0b-2c86778035f3-kube-api-access-mszkm\") pod \"glance-default-external-api-0\" (UID: \"1908b0b1-dafd-46cb-ae0b-2c86778035f3\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.200680 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1908b0b1-dafd-46cb-ae0b-2c86778035f3-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"1908b0b1-dafd-46cb-ae0b-2c86778035f3\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.200717 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"ee189c5e-52ad-4eaf-8415-df2b7f696a0a\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.200744 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1908b0b1-dafd-46cb-ae0b-2c86778035f3-logs\") pod \"glance-default-external-api-0\" (UID: \"1908b0b1-dafd-46cb-ae0b-2c86778035f3\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.200776 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ee189c5e-52ad-4eaf-8415-df2b7f696a0a-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"ee189c5e-52ad-4eaf-8415-df2b7f696a0a\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.200800 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee189c5e-52ad-4eaf-8415-df2b7f696a0a-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"ee189c5e-52ad-4eaf-8415-df2b7f696a0a\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.200831 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee189c5e-52ad-4eaf-8415-df2b7f696a0a-config-data\") pod \"glance-default-internal-api-0\" (UID: \"ee189c5e-52ad-4eaf-8415-df2b7f696a0a\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.200858 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1908b0b1-dafd-46cb-ae0b-2c86778035f3-config-data\") pod \"glance-default-external-api-0\" (UID: \"1908b0b1-dafd-46cb-ae0b-2c86778035f3\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.200880 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ee189c5e-52ad-4eaf-8415-df2b7f696a0a-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"ee189c5e-52ad-4eaf-8415-df2b7f696a0a\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.200938 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dgmkf\" (UniqueName: \"kubernetes.io/projected/ee189c5e-52ad-4eaf-8415-df2b7f696a0a-kube-api-access-dgmkf\") pod \"glance-default-internal-api-0\" (UID: \"ee189c5e-52ad-4eaf-8415-df2b7f696a0a\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.302197 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"ee189c5e-52ad-4eaf-8415-df2b7f696a0a\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.302241 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1908b0b1-dafd-46cb-ae0b-2c86778035f3-logs\") pod \"glance-default-external-api-0\" (UID: \"1908b0b1-dafd-46cb-ae0b-2c86778035f3\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.302276 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ee189c5e-52ad-4eaf-8415-df2b7f696a0a-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"ee189c5e-52ad-4eaf-8415-df2b7f696a0a\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.302297 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee189c5e-52ad-4eaf-8415-df2b7f696a0a-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"ee189c5e-52ad-4eaf-8415-df2b7f696a0a\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.302329 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee189c5e-52ad-4eaf-8415-df2b7f696a0a-config-data\") pod \"glance-default-internal-api-0\" (UID: \"ee189c5e-52ad-4eaf-8415-df2b7f696a0a\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.302353 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1908b0b1-dafd-46cb-ae0b-2c86778035f3-config-data\") pod \"glance-default-external-api-0\" (UID: \"1908b0b1-dafd-46cb-ae0b-2c86778035f3\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.302373 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ee189c5e-52ad-4eaf-8415-df2b7f696a0a-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"ee189c5e-52ad-4eaf-8415-df2b7f696a0a\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.302394 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dgmkf\" (UniqueName: \"kubernetes.io/projected/ee189c5e-52ad-4eaf-8415-df2b7f696a0a-kube-api-access-dgmkf\") pod \"glance-default-internal-api-0\" (UID: \"ee189c5e-52ad-4eaf-8415-df2b7f696a0a\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.302430 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/1908b0b1-dafd-46cb-ae0b-2c86778035f3-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"1908b0b1-dafd-46cb-ae0b-2c86778035f3\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.302470 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1908b0b1-dafd-46cb-ae0b-2c86778035f3-scripts\") pod \"glance-default-external-api-0\" (UID: \"1908b0b1-dafd-46cb-ae0b-2c86778035f3\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.302517 5133 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"ee189c5e-52ad-4eaf-8415-df2b7f696a0a\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/glance-default-internal-api-0" Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.302525 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/1908b0b1-dafd-46cb-ae0b-2c86778035f3-ceph\") pod \"glance-default-external-api-0\" (UID: \"1908b0b1-dafd-46cb-ae0b-2c86778035f3\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.302544 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ee189c5e-52ad-4eaf-8415-df2b7f696a0a-scripts\") pod \"glance-default-internal-api-0\" (UID: \"ee189c5e-52ad-4eaf-8415-df2b7f696a0a\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.302566 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"1908b0b1-dafd-46cb-ae0b-2c86778035f3\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.302597 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ee189c5e-52ad-4eaf-8415-df2b7f696a0a-logs\") pod \"glance-default-internal-api-0\" (UID: \"ee189c5e-52ad-4eaf-8415-df2b7f696a0a\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.302635 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/ee189c5e-52ad-4eaf-8415-df2b7f696a0a-ceph\") pod \"glance-default-internal-api-0\" (UID: \"ee189c5e-52ad-4eaf-8415-df2b7f696a0a\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.302674 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1908b0b1-dafd-46cb-ae0b-2c86778035f3-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"1908b0b1-dafd-46cb-ae0b-2c86778035f3\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.302706 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mszkm\" (UniqueName: \"kubernetes.io/projected/1908b0b1-dafd-46cb-ae0b-2c86778035f3-kube-api-access-mszkm\") pod \"glance-default-external-api-0\" (UID: \"1908b0b1-dafd-46cb-ae0b-2c86778035f3\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.302736 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1908b0b1-dafd-46cb-ae0b-2c86778035f3-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"1908b0b1-dafd-46cb-ae0b-2c86778035f3\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.302797 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1908b0b1-dafd-46cb-ae0b-2c86778035f3-logs\") pod \"glance-default-external-api-0\" (UID: \"1908b0b1-dafd-46cb-ae0b-2c86778035f3\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.307916 5133 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"1908b0b1-dafd-46cb-ae0b-2c86778035f3\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/glance-default-external-api-0" Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.390713 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/1908b0b1-dafd-46cb-ae0b-2c86778035f3-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"1908b0b1-dafd-46cb-ae0b-2c86778035f3\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.416094 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ee189c5e-52ad-4eaf-8415-df2b7f696a0a-logs\") pod \"glance-default-internal-api-0\" (UID: \"ee189c5e-52ad-4eaf-8415-df2b7f696a0a\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.420599 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1908b0b1-dafd-46cb-ae0b-2c86778035f3-config-data\") pod \"glance-default-external-api-0\" (UID: \"1908b0b1-dafd-46cb-ae0b-2c86778035f3\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.444669 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1908b0b1-dafd-46cb-ae0b-2c86778035f3-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"1908b0b1-dafd-46cb-ae0b-2c86778035f3\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.445636 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee189c5e-52ad-4eaf-8415-df2b7f696a0a-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"ee189c5e-52ad-4eaf-8415-df2b7f696a0a\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.451651 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"ee189c5e-52ad-4eaf-8415-df2b7f696a0a\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.452970 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"1908b0b1-dafd-46cb-ae0b-2c86778035f3\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.467374 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1908b0b1-dafd-46cb-ae0b-2c86778035f3-scripts\") pod \"glance-default-external-api-0\" (UID: \"1908b0b1-dafd-46cb-ae0b-2c86778035f3\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.469505 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ee189c5e-52ad-4eaf-8415-df2b7f696a0a-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"ee189c5e-52ad-4eaf-8415-df2b7f696a0a\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.483335 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ee189c5e-52ad-4eaf-8415-df2b7f696a0a-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"ee189c5e-52ad-4eaf-8415-df2b7f696a0a\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.484112 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1908b0b1-dafd-46cb-ae0b-2c86778035f3-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"1908b0b1-dafd-46cb-ae0b-2c86778035f3\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.487319 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/ee189c5e-52ad-4eaf-8415-df2b7f696a0a-ceph\") pod \"glance-default-internal-api-0\" (UID: \"ee189c5e-52ad-4eaf-8415-df2b7f696a0a\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.487374 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/1908b0b1-dafd-46cb-ae0b-2c86778035f3-ceph\") pod \"glance-default-external-api-0\" (UID: \"1908b0b1-dafd-46cb-ae0b-2c86778035f3\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.487361 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ee189c5e-52ad-4eaf-8415-df2b7f696a0a-scripts\") pod \"glance-default-internal-api-0\" (UID: \"ee189c5e-52ad-4eaf-8415-df2b7f696a0a\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.488423 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee189c5e-52ad-4eaf-8415-df2b7f696a0a-config-data\") pod \"glance-default-internal-api-0\" (UID: \"ee189c5e-52ad-4eaf-8415-df2b7f696a0a\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.488702 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dgmkf\" (UniqueName: \"kubernetes.io/projected/ee189c5e-52ad-4eaf-8415-df2b7f696a0a-kube-api-access-dgmkf\") pod \"glance-default-internal-api-0\" (UID: \"ee189c5e-52ad-4eaf-8415-df2b7f696a0a\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.490726 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mszkm\" (UniqueName: \"kubernetes.io/projected/1908b0b1-dafd-46cb-ae0b-2c86778035f3-kube-api-access-mszkm\") pod \"glance-default-external-api-0\" (UID: \"1908b0b1-dafd-46cb-ae0b-2c86778035f3\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.560223 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.619199 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.900597 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-88e9-account-create-kv6xr" event={"ID":"6dd226ff-efe4-4adb-b60f-68641e1226a8","Type":"ContainerStarted","Data":"221e263e71f739040ba9afb120f8129ce2ce8c1a5dd4841a7c6a8d0844e254e1"} Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.900642 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-88e9-account-create-kv6xr" event={"ID":"6dd226ff-efe4-4adb-b60f-68641e1226a8","Type":"ContainerStarted","Data":"70018f0190f37a00bb7d882ccca4d60f1549b25f9e6bdd65086d18aa94aacf29"} Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.906263 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-85888d8669-n7vg6" event={"ID":"120ff136-ecbc-4f53-9338-d1435003710a","Type":"ContainerStarted","Data":"d0714f83e8b9cd2c56b79360582a73e54d154a266477b19465491378707b14b6"} Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.908489 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-create-2zdfp" event={"ID":"0c2588b0-ad67-4806-a52c-7b0e7a7302c8","Type":"ContainerStarted","Data":"c73fe7d91dbdae642d848f38668aae283ac9eeae0d618250d60f7b3f4508daa3"} Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.909404 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-c44865549-jt2j7" event={"ID":"a343caf3-f248-4c28-8131-d074688735ad","Type":"ContainerStarted","Data":"24236545fa5456b80352cf6026cf34f0782baf823029a6c8078f92f5296bedf0"} Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.927705 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-88e9-account-create-kv6xr" podStartSLOduration=2.927684371 podStartE2EDuration="2.927684371s" podCreationTimestamp="2025-11-21 14:44:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 14:44:11.912928062 +0000 UTC m=+3711.710760310" watchObservedRunningTime="2025-11-21 14:44:11.927684371 +0000 UTC m=+3711.725516619" Nov 21 14:44:11 crc kubenswrapper[5133]: I1121 14:44:11.937737 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-db-create-2zdfp" podStartSLOduration=2.937718615 podStartE2EDuration="2.937718615s" podCreationTimestamp="2025-11-21 14:44:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 14:44:11.924330073 +0000 UTC m=+3711.722162321" watchObservedRunningTime="2025-11-21 14:44:11.937718615 +0000 UTC m=+3711.735550863" Nov 21 14:44:12 crc kubenswrapper[5133]: I1121 14:44:12.105652 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-85888d8669-n7vg6"] Nov 21 14:44:12 crc kubenswrapper[5133]: I1121 14:44:12.150321 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-fc96bd488-z7zf6"] Nov 21 14:44:12 crc kubenswrapper[5133]: I1121 14:44:12.151896 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-fc96bd488-z7zf6" Nov 21 14:44:12 crc kubenswrapper[5133]: I1121 14:44:12.168370 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-horizon-svc" Nov 21 14:44:12 crc kubenswrapper[5133]: I1121 14:44:12.171726 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 21 14:44:12 crc kubenswrapper[5133]: I1121 14:44:12.190943 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-fc96bd488-z7zf6"] Nov 21 14:44:12 crc kubenswrapper[5133]: I1121 14:44:12.267150 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-c44865549-jt2j7"] Nov 21 14:44:12 crc kubenswrapper[5133]: I1121 14:44:12.316488 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 21 14:44:12 crc kubenswrapper[5133]: I1121 14:44:12.331699 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-6494f4b7cd-7bgbs"] Nov 21 14:44:12 crc kubenswrapper[5133]: I1121 14:44:12.333754 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6494f4b7cd-7bgbs" Nov 21 14:44:12 crc kubenswrapper[5133]: I1121 14:44:12.339057 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/63a7b11c-9f04-4a20-a705-a8ae4fe98285-config-data\") pod \"horizon-fc96bd488-z7zf6\" (UID: \"63a7b11c-9f04-4a20-a705-a8ae4fe98285\") " pod="openstack/horizon-fc96bd488-z7zf6" Nov 21 14:44:12 crc kubenswrapper[5133]: I1121 14:44:12.339152 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x49ng\" (UniqueName: \"kubernetes.io/projected/63a7b11c-9f04-4a20-a705-a8ae4fe98285-kube-api-access-x49ng\") pod \"horizon-fc96bd488-z7zf6\" (UID: \"63a7b11c-9f04-4a20-a705-a8ae4fe98285\") " pod="openstack/horizon-fc96bd488-z7zf6" Nov 21 14:44:12 crc kubenswrapper[5133]: I1121 14:44:12.339209 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/63a7b11c-9f04-4a20-a705-a8ae4fe98285-combined-ca-bundle\") pod \"horizon-fc96bd488-z7zf6\" (UID: \"63a7b11c-9f04-4a20-a705-a8ae4fe98285\") " pod="openstack/horizon-fc96bd488-z7zf6" Nov 21 14:44:12 crc kubenswrapper[5133]: I1121 14:44:12.339302 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/63a7b11c-9f04-4a20-a705-a8ae4fe98285-logs\") pod \"horizon-fc96bd488-z7zf6\" (UID: \"63a7b11c-9f04-4a20-a705-a8ae4fe98285\") " pod="openstack/horizon-fc96bd488-z7zf6" Nov 21 14:44:12 crc kubenswrapper[5133]: I1121 14:44:12.339331 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/63a7b11c-9f04-4a20-a705-a8ae4fe98285-scripts\") pod \"horizon-fc96bd488-z7zf6\" (UID: \"63a7b11c-9f04-4a20-a705-a8ae4fe98285\") " pod="openstack/horizon-fc96bd488-z7zf6" Nov 21 14:44:12 crc kubenswrapper[5133]: I1121 14:44:12.339362 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/63a7b11c-9f04-4a20-a705-a8ae4fe98285-horizon-secret-key\") pod \"horizon-fc96bd488-z7zf6\" (UID: \"63a7b11c-9f04-4a20-a705-a8ae4fe98285\") " pod="openstack/horizon-fc96bd488-z7zf6" Nov 21 14:44:12 crc kubenswrapper[5133]: I1121 14:44:12.339389 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/63a7b11c-9f04-4a20-a705-a8ae4fe98285-horizon-tls-certs\") pod \"horizon-fc96bd488-z7zf6\" (UID: \"63a7b11c-9f04-4a20-a705-a8ae4fe98285\") " pod="openstack/horizon-fc96bd488-z7zf6" Nov 21 14:44:12 crc kubenswrapper[5133]: I1121 14:44:12.352019 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 21 14:44:12 crc kubenswrapper[5133]: I1121 14:44:12.360522 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-6494f4b7cd-7bgbs"] Nov 21 14:44:12 crc kubenswrapper[5133]: I1121 14:44:12.443035 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ce180175-f40a-48ea-bf15-1c8bfa8ff9aa-scripts\") pod \"horizon-6494f4b7cd-7bgbs\" (UID: \"ce180175-f40a-48ea-bf15-1c8bfa8ff9aa\") " pod="openstack/horizon-6494f4b7cd-7bgbs" Nov 21 14:44:12 crc kubenswrapper[5133]: I1121 14:44:12.443122 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/ce180175-f40a-48ea-bf15-1c8bfa8ff9aa-horizon-secret-key\") pod \"horizon-6494f4b7cd-7bgbs\" (UID: \"ce180175-f40a-48ea-bf15-1c8bfa8ff9aa\") " pod="openstack/horizon-6494f4b7cd-7bgbs" Nov 21 14:44:12 crc kubenswrapper[5133]: I1121 14:44:12.443160 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6b4dt\" (UniqueName: \"kubernetes.io/projected/ce180175-f40a-48ea-bf15-1c8bfa8ff9aa-kube-api-access-6b4dt\") pod \"horizon-6494f4b7cd-7bgbs\" (UID: \"ce180175-f40a-48ea-bf15-1c8bfa8ff9aa\") " pod="openstack/horizon-6494f4b7cd-7bgbs" Nov 21 14:44:12 crc kubenswrapper[5133]: I1121 14:44:12.443183 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ce180175-f40a-48ea-bf15-1c8bfa8ff9aa-config-data\") pod \"horizon-6494f4b7cd-7bgbs\" (UID: \"ce180175-f40a-48ea-bf15-1c8bfa8ff9aa\") " pod="openstack/horizon-6494f4b7cd-7bgbs" Nov 21 14:44:12 crc kubenswrapper[5133]: I1121 14:44:12.443213 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/63a7b11c-9f04-4a20-a705-a8ae4fe98285-logs\") pod \"horizon-fc96bd488-z7zf6\" (UID: \"63a7b11c-9f04-4a20-a705-a8ae4fe98285\") " pod="openstack/horizon-fc96bd488-z7zf6" Nov 21 14:44:12 crc kubenswrapper[5133]: I1121 14:44:12.443233 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/63a7b11c-9f04-4a20-a705-a8ae4fe98285-scripts\") pod \"horizon-fc96bd488-z7zf6\" (UID: \"63a7b11c-9f04-4a20-a705-a8ae4fe98285\") " pod="openstack/horizon-fc96bd488-z7zf6" Nov 21 14:44:12 crc kubenswrapper[5133]: I1121 14:44:12.443254 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/63a7b11c-9f04-4a20-a705-a8ae4fe98285-horizon-secret-key\") pod \"horizon-fc96bd488-z7zf6\" (UID: \"63a7b11c-9f04-4a20-a705-a8ae4fe98285\") " pod="openstack/horizon-fc96bd488-z7zf6" Nov 21 14:44:12 crc kubenswrapper[5133]: I1121 14:44:12.443278 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/63a7b11c-9f04-4a20-a705-a8ae4fe98285-horizon-tls-certs\") pod \"horizon-fc96bd488-z7zf6\" (UID: \"63a7b11c-9f04-4a20-a705-a8ae4fe98285\") " pod="openstack/horizon-fc96bd488-z7zf6" Nov 21 14:44:12 crc kubenswrapper[5133]: I1121 14:44:12.443308 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/63a7b11c-9f04-4a20-a705-a8ae4fe98285-config-data\") pod \"horizon-fc96bd488-z7zf6\" (UID: \"63a7b11c-9f04-4a20-a705-a8ae4fe98285\") " pod="openstack/horizon-fc96bd488-z7zf6" Nov 21 14:44:12 crc kubenswrapper[5133]: I1121 14:44:12.443328 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/ce180175-f40a-48ea-bf15-1c8bfa8ff9aa-horizon-tls-certs\") pod \"horizon-6494f4b7cd-7bgbs\" (UID: \"ce180175-f40a-48ea-bf15-1c8bfa8ff9aa\") " pod="openstack/horizon-6494f4b7cd-7bgbs" Nov 21 14:44:12 crc kubenswrapper[5133]: I1121 14:44:12.443351 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce180175-f40a-48ea-bf15-1c8bfa8ff9aa-combined-ca-bundle\") pod \"horizon-6494f4b7cd-7bgbs\" (UID: \"ce180175-f40a-48ea-bf15-1c8bfa8ff9aa\") " pod="openstack/horizon-6494f4b7cd-7bgbs" Nov 21 14:44:12 crc kubenswrapper[5133]: I1121 14:44:12.443382 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ce180175-f40a-48ea-bf15-1c8bfa8ff9aa-logs\") pod \"horizon-6494f4b7cd-7bgbs\" (UID: \"ce180175-f40a-48ea-bf15-1c8bfa8ff9aa\") " pod="openstack/horizon-6494f4b7cd-7bgbs" Nov 21 14:44:12 crc kubenswrapper[5133]: I1121 14:44:12.443401 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x49ng\" (UniqueName: \"kubernetes.io/projected/63a7b11c-9f04-4a20-a705-a8ae4fe98285-kube-api-access-x49ng\") pod \"horizon-fc96bd488-z7zf6\" (UID: \"63a7b11c-9f04-4a20-a705-a8ae4fe98285\") " pod="openstack/horizon-fc96bd488-z7zf6" Nov 21 14:44:12 crc kubenswrapper[5133]: I1121 14:44:12.443438 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/63a7b11c-9f04-4a20-a705-a8ae4fe98285-combined-ca-bundle\") pod \"horizon-fc96bd488-z7zf6\" (UID: \"63a7b11c-9f04-4a20-a705-a8ae4fe98285\") " pod="openstack/horizon-fc96bd488-z7zf6" Nov 21 14:44:12 crc kubenswrapper[5133]: I1121 14:44:12.445218 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/63a7b11c-9f04-4a20-a705-a8ae4fe98285-logs\") pod \"horizon-fc96bd488-z7zf6\" (UID: \"63a7b11c-9f04-4a20-a705-a8ae4fe98285\") " pod="openstack/horizon-fc96bd488-z7zf6" Nov 21 14:44:12 crc kubenswrapper[5133]: I1121 14:44:12.446640 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/63a7b11c-9f04-4a20-a705-a8ae4fe98285-config-data\") pod \"horizon-fc96bd488-z7zf6\" (UID: \"63a7b11c-9f04-4a20-a705-a8ae4fe98285\") " pod="openstack/horizon-fc96bd488-z7zf6" Nov 21 14:44:12 crc kubenswrapper[5133]: I1121 14:44:12.449706 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/63a7b11c-9f04-4a20-a705-a8ae4fe98285-scripts\") pod \"horizon-fc96bd488-z7zf6\" (UID: \"63a7b11c-9f04-4a20-a705-a8ae4fe98285\") " pod="openstack/horizon-fc96bd488-z7zf6" Nov 21 14:44:12 crc kubenswrapper[5133]: I1121 14:44:12.454765 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/63a7b11c-9f04-4a20-a705-a8ae4fe98285-horizon-tls-certs\") pod \"horizon-fc96bd488-z7zf6\" (UID: \"63a7b11c-9f04-4a20-a705-a8ae4fe98285\") " pod="openstack/horizon-fc96bd488-z7zf6" Nov 21 14:44:12 crc kubenswrapper[5133]: I1121 14:44:12.461303 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/63a7b11c-9f04-4a20-a705-a8ae4fe98285-combined-ca-bundle\") pod \"horizon-fc96bd488-z7zf6\" (UID: \"63a7b11c-9f04-4a20-a705-a8ae4fe98285\") " pod="openstack/horizon-fc96bd488-z7zf6" Nov 21 14:44:12 crc kubenswrapper[5133]: I1121 14:44:12.473541 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/63a7b11c-9f04-4a20-a705-a8ae4fe98285-horizon-secret-key\") pod \"horizon-fc96bd488-z7zf6\" (UID: \"63a7b11c-9f04-4a20-a705-a8ae4fe98285\") " pod="openstack/horizon-fc96bd488-z7zf6" Nov 21 14:44:12 crc kubenswrapper[5133]: I1121 14:44:12.484780 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x49ng\" (UniqueName: \"kubernetes.io/projected/63a7b11c-9f04-4a20-a705-a8ae4fe98285-kube-api-access-x49ng\") pod \"horizon-fc96bd488-z7zf6\" (UID: \"63a7b11c-9f04-4a20-a705-a8ae4fe98285\") " pod="openstack/horizon-fc96bd488-z7zf6" Nov 21 14:44:12 crc kubenswrapper[5133]: I1121 14:44:12.492220 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0c78cf80-ceb7-4407-a970-32a803307e25" path="/var/lib/kubelet/pods/0c78cf80-ceb7-4407-a970-32a803307e25/volumes" Nov 21 14:44:12 crc kubenswrapper[5133]: I1121 14:44:12.492709 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5" path="/var/lib/kubelet/pods/a414e6e4-3a74-43b5-9b6a-3e5c8a8f67c5/volumes" Nov 21 14:44:12 crc kubenswrapper[5133]: I1121 14:44:12.494505 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-fc96bd488-z7zf6" Nov 21 14:44:12 crc kubenswrapper[5133]: I1121 14:44:12.548106 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/ce180175-f40a-48ea-bf15-1c8bfa8ff9aa-horizon-secret-key\") pod \"horizon-6494f4b7cd-7bgbs\" (UID: \"ce180175-f40a-48ea-bf15-1c8bfa8ff9aa\") " pod="openstack/horizon-6494f4b7cd-7bgbs" Nov 21 14:44:12 crc kubenswrapper[5133]: I1121 14:44:12.548198 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6b4dt\" (UniqueName: \"kubernetes.io/projected/ce180175-f40a-48ea-bf15-1c8bfa8ff9aa-kube-api-access-6b4dt\") pod \"horizon-6494f4b7cd-7bgbs\" (UID: \"ce180175-f40a-48ea-bf15-1c8bfa8ff9aa\") " pod="openstack/horizon-6494f4b7cd-7bgbs" Nov 21 14:44:12 crc kubenswrapper[5133]: I1121 14:44:12.548225 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ce180175-f40a-48ea-bf15-1c8bfa8ff9aa-config-data\") pod \"horizon-6494f4b7cd-7bgbs\" (UID: \"ce180175-f40a-48ea-bf15-1c8bfa8ff9aa\") " pod="openstack/horizon-6494f4b7cd-7bgbs" Nov 21 14:44:12 crc kubenswrapper[5133]: I1121 14:44:12.548295 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/ce180175-f40a-48ea-bf15-1c8bfa8ff9aa-horizon-tls-certs\") pod \"horizon-6494f4b7cd-7bgbs\" (UID: \"ce180175-f40a-48ea-bf15-1c8bfa8ff9aa\") " pod="openstack/horizon-6494f4b7cd-7bgbs" Nov 21 14:44:12 crc kubenswrapper[5133]: I1121 14:44:12.548320 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce180175-f40a-48ea-bf15-1c8bfa8ff9aa-combined-ca-bundle\") pod \"horizon-6494f4b7cd-7bgbs\" (UID: \"ce180175-f40a-48ea-bf15-1c8bfa8ff9aa\") " pod="openstack/horizon-6494f4b7cd-7bgbs" Nov 21 14:44:12 crc kubenswrapper[5133]: I1121 14:44:12.548351 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ce180175-f40a-48ea-bf15-1c8bfa8ff9aa-logs\") pod \"horizon-6494f4b7cd-7bgbs\" (UID: \"ce180175-f40a-48ea-bf15-1c8bfa8ff9aa\") " pod="openstack/horizon-6494f4b7cd-7bgbs" Nov 21 14:44:12 crc kubenswrapper[5133]: I1121 14:44:12.548387 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ce180175-f40a-48ea-bf15-1c8bfa8ff9aa-scripts\") pod \"horizon-6494f4b7cd-7bgbs\" (UID: \"ce180175-f40a-48ea-bf15-1c8bfa8ff9aa\") " pod="openstack/horizon-6494f4b7cd-7bgbs" Nov 21 14:44:12 crc kubenswrapper[5133]: I1121 14:44:12.549115 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ce180175-f40a-48ea-bf15-1c8bfa8ff9aa-scripts\") pod \"horizon-6494f4b7cd-7bgbs\" (UID: \"ce180175-f40a-48ea-bf15-1c8bfa8ff9aa\") " pod="openstack/horizon-6494f4b7cd-7bgbs" Nov 21 14:44:12 crc kubenswrapper[5133]: I1121 14:44:12.561704 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ce180175-f40a-48ea-bf15-1c8bfa8ff9aa-config-data\") pod \"horizon-6494f4b7cd-7bgbs\" (UID: \"ce180175-f40a-48ea-bf15-1c8bfa8ff9aa\") " pod="openstack/horizon-6494f4b7cd-7bgbs" Nov 21 14:44:12 crc kubenswrapper[5133]: I1121 14:44:12.567769 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ce180175-f40a-48ea-bf15-1c8bfa8ff9aa-logs\") pod \"horizon-6494f4b7cd-7bgbs\" (UID: \"ce180175-f40a-48ea-bf15-1c8bfa8ff9aa\") " pod="openstack/horizon-6494f4b7cd-7bgbs" Nov 21 14:44:12 crc kubenswrapper[5133]: I1121 14:44:12.570097 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/ce180175-f40a-48ea-bf15-1c8bfa8ff9aa-horizon-secret-key\") pod \"horizon-6494f4b7cd-7bgbs\" (UID: \"ce180175-f40a-48ea-bf15-1c8bfa8ff9aa\") " pod="openstack/horizon-6494f4b7cd-7bgbs" Nov 21 14:44:12 crc kubenswrapper[5133]: I1121 14:44:12.579612 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/ce180175-f40a-48ea-bf15-1c8bfa8ff9aa-horizon-tls-certs\") pod \"horizon-6494f4b7cd-7bgbs\" (UID: \"ce180175-f40a-48ea-bf15-1c8bfa8ff9aa\") " pod="openstack/horizon-6494f4b7cd-7bgbs" Nov 21 14:44:12 crc kubenswrapper[5133]: I1121 14:44:12.589958 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce180175-f40a-48ea-bf15-1c8bfa8ff9aa-combined-ca-bundle\") pod \"horizon-6494f4b7cd-7bgbs\" (UID: \"ce180175-f40a-48ea-bf15-1c8bfa8ff9aa\") " pod="openstack/horizon-6494f4b7cd-7bgbs" Nov 21 14:44:12 crc kubenswrapper[5133]: I1121 14:44:12.615650 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6b4dt\" (UniqueName: \"kubernetes.io/projected/ce180175-f40a-48ea-bf15-1c8bfa8ff9aa-kube-api-access-6b4dt\") pod \"horizon-6494f4b7cd-7bgbs\" (UID: \"ce180175-f40a-48ea-bf15-1c8bfa8ff9aa\") " pod="openstack/horizon-6494f4b7cd-7bgbs" Nov 21 14:44:12 crc kubenswrapper[5133]: I1121 14:44:12.668489 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6494f4b7cd-7bgbs" Nov 21 14:44:12 crc kubenswrapper[5133]: I1121 14:44:12.898046 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 21 14:44:13 crc kubenswrapper[5133]: I1121 14:44:13.003515 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"1908b0b1-dafd-46cb-ae0b-2c86778035f3","Type":"ContainerStarted","Data":"2c1caa8ad135cf9ad1dcdeb0d31f290e2f824f42cc2a33e012cb44ac7561da2b"} Nov 21 14:44:13 crc kubenswrapper[5133]: I1121 14:44:13.008057 5133 generic.go:334] "Generic (PLEG): container finished" podID="0c2588b0-ad67-4806-a52c-7b0e7a7302c8" containerID="c73fe7d91dbdae642d848f38668aae283ac9eeae0d618250d60f7b3f4508daa3" exitCode=0 Nov 21 14:44:13 crc kubenswrapper[5133]: I1121 14:44:13.008458 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-create-2zdfp" event={"ID":"0c2588b0-ad67-4806-a52c-7b0e7a7302c8","Type":"ContainerDied","Data":"c73fe7d91dbdae642d848f38668aae283ac9eeae0d618250d60f7b3f4508daa3"} Nov 21 14:44:13 crc kubenswrapper[5133]: I1121 14:44:13.011616 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"ee189c5e-52ad-4eaf-8415-df2b7f696a0a","Type":"ContainerStarted","Data":"91ea51424f624d3b72b8a61445968eb1320a69013f5d9fee5adbe03cf67d7e93"} Nov 21 14:44:13 crc kubenswrapper[5133]: I1121 14:44:13.035565 5133 generic.go:334] "Generic (PLEG): container finished" podID="6dd226ff-efe4-4adb-b60f-68641e1226a8" containerID="221e263e71f739040ba9afb120f8129ce2ce8c1a5dd4841a7c6a8d0844e254e1" exitCode=0 Nov 21 14:44:13 crc kubenswrapper[5133]: I1121 14:44:13.035620 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-88e9-account-create-kv6xr" event={"ID":"6dd226ff-efe4-4adb-b60f-68641e1226a8","Type":"ContainerDied","Data":"221e263e71f739040ba9afb120f8129ce2ce8c1a5dd4841a7c6a8d0844e254e1"} Nov 21 14:44:13 crc kubenswrapper[5133]: I1121 14:44:13.230110 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-fc96bd488-z7zf6"] Nov 21 14:44:13 crc kubenswrapper[5133]: W1121 14:44:13.297708 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod63a7b11c_9f04_4a20_a705_a8ae4fe98285.slice/crio-7b6ed096b04e4b5029c5bc6b9d0edb0469fc557944e4a48e090452e6233e2741 WatchSource:0}: Error finding container 7b6ed096b04e4b5029c5bc6b9d0edb0469fc557944e4a48e090452e6233e2741: Status 404 returned error can't find the container with id 7b6ed096b04e4b5029c5bc6b9d0edb0469fc557944e4a48e090452e6233e2741 Nov 21 14:44:13 crc kubenswrapper[5133]: I1121 14:44:13.339681 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-6494f4b7cd-7bgbs"] Nov 21 14:44:14 crc kubenswrapper[5133]: I1121 14:44:14.052044 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"1908b0b1-dafd-46cb-ae0b-2c86778035f3","Type":"ContainerStarted","Data":"546f68823718c4bb671ba94fec1382fd9def50bb3ca511c98b85929c872db9e3"} Nov 21 14:44:14 crc kubenswrapper[5133]: I1121 14:44:14.055548 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-backup-0" event={"ID":"656b7d50-9049-415a-a4b1-08b531893110","Type":"ContainerStarted","Data":"5466cec74d8c6457ba743ae48220db3cbdc68639155a166d28ffa3f79c92ad3e"} Nov 21 14:44:14 crc kubenswrapper[5133]: I1121 14:44:14.059103 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"ee189c5e-52ad-4eaf-8415-df2b7f696a0a","Type":"ContainerStarted","Data":"38ab75861e650a9738c649d71adb68ad6e9326811a395e570f05583fe015faa5"} Nov 21 14:44:14 crc kubenswrapper[5133]: I1121 14:44:14.061488 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-fc96bd488-z7zf6" event={"ID":"63a7b11c-9f04-4a20-a705-a8ae4fe98285","Type":"ContainerStarted","Data":"7b6ed096b04e4b5029c5bc6b9d0edb0469fc557944e4a48e090452e6233e2741"} Nov 21 14:44:14 crc kubenswrapper[5133]: I1121 14:44:14.063194 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6494f4b7cd-7bgbs" event={"ID":"ce180175-f40a-48ea-bf15-1c8bfa8ff9aa","Type":"ContainerStarted","Data":"f9977b7769649381bbf3bb6eff75f386094a3b06a440668569ddf00b42d0164d"} Nov 21 14:44:14 crc kubenswrapper[5133]: I1121 14:44:14.066347 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-volume1-0" event={"ID":"3bf15e01-8975-4617-b353-163613da4bc5","Type":"ContainerStarted","Data":"6137efce3af68d874e0e5e04ef7a04c463d53b719a0d5e15cc4c152ed7bba220"} Nov 21 14:44:14 crc kubenswrapper[5133]: I1121 14:44:14.067874 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-volume1-0" event={"ID":"3bf15e01-8975-4617-b353-163613da4bc5","Type":"ContainerStarted","Data":"bbfb7a5be3bc20a89d4262d8f7d58651cd93cf077882697fec7d250deb1ae651"} Nov 21 14:44:14 crc kubenswrapper[5133]: I1121 14:44:14.096864 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-volume-volume1-0" podStartSLOduration=3.13167091 podStartE2EDuration="6.096846815s" podCreationTimestamp="2025-11-21 14:44:08 +0000 UTC" firstStartedPulling="2025-11-21 14:44:09.496622076 +0000 UTC m=+3709.294454324" lastFinishedPulling="2025-11-21 14:44:12.461797981 +0000 UTC m=+3712.259630229" observedRunningTime="2025-11-21 14:44:14.092013568 +0000 UTC m=+3713.889845816" watchObservedRunningTime="2025-11-21 14:44:14.096846815 +0000 UTC m=+3713.894679063" Nov 21 14:44:14 crc kubenswrapper[5133]: I1121 14:44:14.663183 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-create-2zdfp" Nov 21 14:44:14 crc kubenswrapper[5133]: I1121 14:44:14.710279 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-88e9-account-create-kv6xr" Nov 21 14:44:14 crc kubenswrapper[5133]: I1121 14:44:14.830658 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-28db6\" (UniqueName: \"kubernetes.io/projected/6dd226ff-efe4-4adb-b60f-68641e1226a8-kube-api-access-28db6\") pod \"6dd226ff-efe4-4adb-b60f-68641e1226a8\" (UID: \"6dd226ff-efe4-4adb-b60f-68641e1226a8\") " Nov 21 14:44:14 crc kubenswrapper[5133]: I1121 14:44:14.830978 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6dd226ff-efe4-4adb-b60f-68641e1226a8-operator-scripts\") pod \"6dd226ff-efe4-4adb-b60f-68641e1226a8\" (UID: \"6dd226ff-efe4-4adb-b60f-68641e1226a8\") " Nov 21 14:44:14 crc kubenswrapper[5133]: I1121 14:44:14.831026 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0c2588b0-ad67-4806-a52c-7b0e7a7302c8-operator-scripts\") pod \"0c2588b0-ad67-4806-a52c-7b0e7a7302c8\" (UID: \"0c2588b0-ad67-4806-a52c-7b0e7a7302c8\") " Nov 21 14:44:14 crc kubenswrapper[5133]: I1121 14:44:14.831132 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4bf2k\" (UniqueName: \"kubernetes.io/projected/0c2588b0-ad67-4806-a52c-7b0e7a7302c8-kube-api-access-4bf2k\") pod \"0c2588b0-ad67-4806-a52c-7b0e7a7302c8\" (UID: \"0c2588b0-ad67-4806-a52c-7b0e7a7302c8\") " Nov 21 14:44:14 crc kubenswrapper[5133]: I1121 14:44:14.831589 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6dd226ff-efe4-4adb-b60f-68641e1226a8-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "6dd226ff-efe4-4adb-b60f-68641e1226a8" (UID: "6dd226ff-efe4-4adb-b60f-68641e1226a8"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:44:14 crc kubenswrapper[5133]: I1121 14:44:14.831625 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0c2588b0-ad67-4806-a52c-7b0e7a7302c8-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "0c2588b0-ad67-4806-a52c-7b0e7a7302c8" (UID: "0c2588b0-ad67-4806-a52c-7b0e7a7302c8"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:44:14 crc kubenswrapper[5133]: I1121 14:44:14.836579 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0c2588b0-ad67-4806-a52c-7b0e7a7302c8-kube-api-access-4bf2k" (OuterVolumeSpecName: "kube-api-access-4bf2k") pod "0c2588b0-ad67-4806-a52c-7b0e7a7302c8" (UID: "0c2588b0-ad67-4806-a52c-7b0e7a7302c8"). InnerVolumeSpecName "kube-api-access-4bf2k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:44:14 crc kubenswrapper[5133]: I1121 14:44:14.849798 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6dd226ff-efe4-4adb-b60f-68641e1226a8-kube-api-access-28db6" (OuterVolumeSpecName: "kube-api-access-28db6") pod "6dd226ff-efe4-4adb-b60f-68641e1226a8" (UID: "6dd226ff-efe4-4adb-b60f-68641e1226a8"). InnerVolumeSpecName "kube-api-access-28db6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:44:14 crc kubenswrapper[5133]: I1121 14:44:14.934291 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4bf2k\" (UniqueName: \"kubernetes.io/projected/0c2588b0-ad67-4806-a52c-7b0e7a7302c8-kube-api-access-4bf2k\") on node \"crc\" DevicePath \"\"" Nov 21 14:44:14 crc kubenswrapper[5133]: I1121 14:44:14.934334 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-28db6\" (UniqueName: \"kubernetes.io/projected/6dd226ff-efe4-4adb-b60f-68641e1226a8-kube-api-access-28db6\") on node \"crc\" DevicePath \"\"" Nov 21 14:44:14 crc kubenswrapper[5133]: I1121 14:44:14.934351 5133 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6dd226ff-efe4-4adb-b60f-68641e1226a8-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 14:44:14 crc kubenswrapper[5133]: I1121 14:44:14.934363 5133 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0c2588b0-ad67-4806-a52c-7b0e7a7302c8-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 14:44:15 crc kubenswrapper[5133]: I1121 14:44:15.083381 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-create-2zdfp" event={"ID":"0c2588b0-ad67-4806-a52c-7b0e7a7302c8","Type":"ContainerDied","Data":"52739d85d415cfc0285cc8145a82c3ab74c0fc1060ff3b27b2fa8baab4851faf"} Nov 21 14:44:15 crc kubenswrapper[5133]: I1121 14:44:15.083416 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="52739d85d415cfc0285cc8145a82c3ab74c0fc1060ff3b27b2fa8baab4851faf" Nov 21 14:44:15 crc kubenswrapper[5133]: I1121 14:44:15.083466 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-create-2zdfp" Nov 21 14:44:15 crc kubenswrapper[5133]: I1121 14:44:15.088152 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-backup-0" event={"ID":"656b7d50-9049-415a-a4b1-08b531893110","Type":"ContainerStarted","Data":"0a019707334bf32e253e17a1a76964887e1874de6928badcd95af4ec8b4965ae"} Nov 21 14:44:15 crc kubenswrapper[5133]: I1121 14:44:15.093911 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"ee189c5e-52ad-4eaf-8415-df2b7f696a0a","Type":"ContainerStarted","Data":"576f64b117398f008d0672a86fe02d406ca780dfa5228416bdea1ddb4613a04b"} Nov 21 14:44:15 crc kubenswrapper[5133]: I1121 14:44:15.094059 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="ee189c5e-52ad-4eaf-8415-df2b7f696a0a" containerName="glance-log" containerID="cri-o://38ab75861e650a9738c649d71adb68ad6e9326811a395e570f05583fe015faa5" gracePeriod=30 Nov 21 14:44:15 crc kubenswrapper[5133]: I1121 14:44:15.096263 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="ee189c5e-52ad-4eaf-8415-df2b7f696a0a" containerName="glance-httpd" containerID="cri-o://576f64b117398f008d0672a86fe02d406ca780dfa5228416bdea1ddb4613a04b" gracePeriod=30 Nov 21 14:44:15 crc kubenswrapper[5133]: I1121 14:44:15.097020 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-88e9-account-create-kv6xr" event={"ID":"6dd226ff-efe4-4adb-b60f-68641e1226a8","Type":"ContainerDied","Data":"70018f0190f37a00bb7d882ccca4d60f1549b25f9e6bdd65086d18aa94aacf29"} Nov 21 14:44:15 crc kubenswrapper[5133]: I1121 14:44:15.097047 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-88e9-account-create-kv6xr" Nov 21 14:44:15 crc kubenswrapper[5133]: I1121 14:44:15.097057 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="70018f0190f37a00bb7d882ccca4d60f1549b25f9e6bdd65086d18aa94aacf29" Nov 21 14:44:15 crc kubenswrapper[5133]: I1121 14:44:15.100395 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"1908b0b1-dafd-46cb-ae0b-2c86778035f3","Type":"ContainerStarted","Data":"152011da734cb97dba7e32fd47dce61c1f9d9e95bcea7a0b17e4e370c16e5f7b"} Nov 21 14:44:15 crc kubenswrapper[5133]: I1121 14:44:15.100566 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="1908b0b1-dafd-46cb-ae0b-2c86778035f3" containerName="glance-log" containerID="cri-o://546f68823718c4bb671ba94fec1382fd9def50bb3ca511c98b85929c872db9e3" gracePeriod=30 Nov 21 14:44:15 crc kubenswrapper[5133]: I1121 14:44:15.100598 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="1908b0b1-dafd-46cb-ae0b-2c86778035f3" containerName="glance-httpd" containerID="cri-o://152011da734cb97dba7e32fd47dce61c1f9d9e95bcea7a0b17e4e370c16e5f7b" gracePeriod=30 Nov 21 14:44:15 crc kubenswrapper[5133]: I1121 14:44:15.127298 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-backup-0" podStartSLOduration=3.823385323 podStartE2EDuration="7.127277091s" podCreationTimestamp="2025-11-21 14:44:08 +0000 UTC" firstStartedPulling="2025-11-21 14:44:10.088338314 +0000 UTC m=+3709.886170562" lastFinishedPulling="2025-11-21 14:44:13.392230082 +0000 UTC m=+3713.190062330" observedRunningTime="2025-11-21 14:44:15.110380966 +0000 UTC m=+3714.908213214" watchObservedRunningTime="2025-11-21 14:44:15.127277091 +0000 UTC m=+3714.925109339" Nov 21 14:44:15 crc kubenswrapper[5133]: I1121 14:44:15.153575 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=5.153551203 podStartE2EDuration="5.153551203s" podCreationTimestamp="2025-11-21 14:44:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 14:44:15.139327259 +0000 UTC m=+3714.937159517" watchObservedRunningTime="2025-11-21 14:44:15.153551203 +0000 UTC m=+3714.951383451" Nov 21 14:44:15 crc kubenswrapper[5133]: I1121 14:44:15.190291 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=5.190268301 podStartE2EDuration="5.190268301s" podCreationTimestamp="2025-11-21 14:44:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 14:44:15.167295575 +0000 UTC m=+3714.965127823" watchObservedRunningTime="2025-11-21 14:44:15.190268301 +0000 UTC m=+3714.988100549" Nov 21 14:44:15 crc kubenswrapper[5133]: I1121 14:44:15.709377 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 21 14:44:15 crc kubenswrapper[5133]: I1121 14:44:15.793487 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 21 14:44:15 crc kubenswrapper[5133]: I1121 14:44:15.856466 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee189c5e-52ad-4eaf-8415-df2b7f696a0a-config-data\") pod \"ee189c5e-52ad-4eaf-8415-df2b7f696a0a\" (UID: \"ee189c5e-52ad-4eaf-8415-df2b7f696a0a\") " Nov 21 14:44:15 crc kubenswrapper[5133]: I1121 14:44:15.856530 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ee189c5e-52ad-4eaf-8415-df2b7f696a0a-httpd-run\") pod \"ee189c5e-52ad-4eaf-8415-df2b7f696a0a\" (UID: \"ee189c5e-52ad-4eaf-8415-df2b7f696a0a\") " Nov 21 14:44:15 crc kubenswrapper[5133]: I1121 14:44:15.856609 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ee189c5e-52ad-4eaf-8415-df2b7f696a0a\" (UID: \"ee189c5e-52ad-4eaf-8415-df2b7f696a0a\") " Nov 21 14:44:15 crc kubenswrapper[5133]: I1121 14:44:15.856646 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dgmkf\" (UniqueName: \"kubernetes.io/projected/ee189c5e-52ad-4eaf-8415-df2b7f696a0a-kube-api-access-dgmkf\") pod \"ee189c5e-52ad-4eaf-8415-df2b7f696a0a\" (UID: \"ee189c5e-52ad-4eaf-8415-df2b7f696a0a\") " Nov 21 14:44:15 crc kubenswrapper[5133]: I1121 14:44:15.856741 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ee189c5e-52ad-4eaf-8415-df2b7f696a0a-internal-tls-certs\") pod \"ee189c5e-52ad-4eaf-8415-df2b7f696a0a\" (UID: \"ee189c5e-52ad-4eaf-8415-df2b7f696a0a\") " Nov 21 14:44:15 crc kubenswrapper[5133]: I1121 14:44:15.856779 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ee189c5e-52ad-4eaf-8415-df2b7f696a0a-logs\") pod \"ee189c5e-52ad-4eaf-8415-df2b7f696a0a\" (UID: \"ee189c5e-52ad-4eaf-8415-df2b7f696a0a\") " Nov 21 14:44:15 crc kubenswrapper[5133]: I1121 14:44:15.856829 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee189c5e-52ad-4eaf-8415-df2b7f696a0a-combined-ca-bundle\") pod \"ee189c5e-52ad-4eaf-8415-df2b7f696a0a\" (UID: \"ee189c5e-52ad-4eaf-8415-df2b7f696a0a\") " Nov 21 14:44:15 crc kubenswrapper[5133]: I1121 14:44:15.856897 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ee189c5e-52ad-4eaf-8415-df2b7f696a0a-scripts\") pod \"ee189c5e-52ad-4eaf-8415-df2b7f696a0a\" (UID: \"ee189c5e-52ad-4eaf-8415-df2b7f696a0a\") " Nov 21 14:44:15 crc kubenswrapper[5133]: I1121 14:44:15.857013 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/ee189c5e-52ad-4eaf-8415-df2b7f696a0a-ceph\") pod \"ee189c5e-52ad-4eaf-8415-df2b7f696a0a\" (UID: \"ee189c5e-52ad-4eaf-8415-df2b7f696a0a\") " Nov 21 14:44:15 crc kubenswrapper[5133]: I1121 14:44:15.858411 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ee189c5e-52ad-4eaf-8415-df2b7f696a0a-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "ee189c5e-52ad-4eaf-8415-df2b7f696a0a" (UID: "ee189c5e-52ad-4eaf-8415-df2b7f696a0a"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:44:15 crc kubenswrapper[5133]: I1121 14:44:15.858755 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ee189c5e-52ad-4eaf-8415-df2b7f696a0a-logs" (OuterVolumeSpecName: "logs") pod "ee189c5e-52ad-4eaf-8415-df2b7f696a0a" (UID: "ee189c5e-52ad-4eaf-8415-df2b7f696a0a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:44:15 crc kubenswrapper[5133]: I1121 14:44:15.871112 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage04-crc" (OuterVolumeSpecName: "glance") pod "ee189c5e-52ad-4eaf-8415-df2b7f696a0a" (UID: "ee189c5e-52ad-4eaf-8415-df2b7f696a0a"). InnerVolumeSpecName "local-storage04-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 21 14:44:15 crc kubenswrapper[5133]: I1121 14:44:15.878422 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ee189c5e-52ad-4eaf-8415-df2b7f696a0a-ceph" (OuterVolumeSpecName: "ceph") pod "ee189c5e-52ad-4eaf-8415-df2b7f696a0a" (UID: "ee189c5e-52ad-4eaf-8415-df2b7f696a0a"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:44:15 crc kubenswrapper[5133]: I1121 14:44:15.880137 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee189c5e-52ad-4eaf-8415-df2b7f696a0a-scripts" (OuterVolumeSpecName: "scripts") pod "ee189c5e-52ad-4eaf-8415-df2b7f696a0a" (UID: "ee189c5e-52ad-4eaf-8415-df2b7f696a0a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:44:15 crc kubenswrapper[5133]: I1121 14:44:15.883163 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ee189c5e-52ad-4eaf-8415-df2b7f696a0a-kube-api-access-dgmkf" (OuterVolumeSpecName: "kube-api-access-dgmkf") pod "ee189c5e-52ad-4eaf-8415-df2b7f696a0a" (UID: "ee189c5e-52ad-4eaf-8415-df2b7f696a0a"). InnerVolumeSpecName "kube-api-access-dgmkf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:44:15 crc kubenswrapper[5133]: I1121 14:44:15.912967 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee189c5e-52ad-4eaf-8415-df2b7f696a0a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ee189c5e-52ad-4eaf-8415-df2b7f696a0a" (UID: "ee189c5e-52ad-4eaf-8415-df2b7f696a0a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:44:15 crc kubenswrapper[5133]: I1121 14:44:15.922539 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee189c5e-52ad-4eaf-8415-df2b7f696a0a-config-data" (OuterVolumeSpecName: "config-data") pod "ee189c5e-52ad-4eaf-8415-df2b7f696a0a" (UID: "ee189c5e-52ad-4eaf-8415-df2b7f696a0a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:44:15 crc kubenswrapper[5133]: I1121 14:44:15.947266 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee189c5e-52ad-4eaf-8415-df2b7f696a0a-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "ee189c5e-52ad-4eaf-8415-df2b7f696a0a" (UID: "ee189c5e-52ad-4eaf-8415-df2b7f696a0a"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:44:15 crc kubenswrapper[5133]: I1121 14:44:15.958869 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1908b0b1-dafd-46cb-ae0b-2c86778035f3-scripts\") pod \"1908b0b1-dafd-46cb-ae0b-2c86778035f3\" (UID: \"1908b0b1-dafd-46cb-ae0b-2c86778035f3\") " Nov 21 14:44:15 crc kubenswrapper[5133]: I1121 14:44:15.958949 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mszkm\" (UniqueName: \"kubernetes.io/projected/1908b0b1-dafd-46cb-ae0b-2c86778035f3-kube-api-access-mszkm\") pod \"1908b0b1-dafd-46cb-ae0b-2c86778035f3\" (UID: \"1908b0b1-dafd-46cb-ae0b-2c86778035f3\") " Nov 21 14:44:15 crc kubenswrapper[5133]: I1121 14:44:15.959044 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1908b0b1-dafd-46cb-ae0b-2c86778035f3-logs\") pod \"1908b0b1-dafd-46cb-ae0b-2c86778035f3\" (UID: \"1908b0b1-dafd-46cb-ae0b-2c86778035f3\") " Nov 21 14:44:15 crc kubenswrapper[5133]: I1121 14:44:15.959085 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1908b0b1-dafd-46cb-ae0b-2c86778035f3-config-data\") pod \"1908b0b1-dafd-46cb-ae0b-2c86778035f3\" (UID: \"1908b0b1-dafd-46cb-ae0b-2c86778035f3\") " Nov 21 14:44:15 crc kubenswrapper[5133]: I1121 14:44:15.959109 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/1908b0b1-dafd-46cb-ae0b-2c86778035f3-ceph\") pod \"1908b0b1-dafd-46cb-ae0b-2c86778035f3\" (UID: \"1908b0b1-dafd-46cb-ae0b-2c86778035f3\") " Nov 21 14:44:15 crc kubenswrapper[5133]: I1121 14:44:15.959177 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"1908b0b1-dafd-46cb-ae0b-2c86778035f3\" (UID: \"1908b0b1-dafd-46cb-ae0b-2c86778035f3\") " Nov 21 14:44:15 crc kubenswrapper[5133]: I1121 14:44:15.959606 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1908b0b1-dafd-46cb-ae0b-2c86778035f3-logs" (OuterVolumeSpecName: "logs") pod "1908b0b1-dafd-46cb-ae0b-2c86778035f3" (UID: "1908b0b1-dafd-46cb-ae0b-2c86778035f3"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:44:15 crc kubenswrapper[5133]: I1121 14:44:15.960142 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1908b0b1-dafd-46cb-ae0b-2c86778035f3-combined-ca-bundle\") pod \"1908b0b1-dafd-46cb-ae0b-2c86778035f3\" (UID: \"1908b0b1-dafd-46cb-ae0b-2c86778035f3\") " Nov 21 14:44:15 crc kubenswrapper[5133]: I1121 14:44:15.960173 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/1908b0b1-dafd-46cb-ae0b-2c86778035f3-httpd-run\") pod \"1908b0b1-dafd-46cb-ae0b-2c86778035f3\" (UID: \"1908b0b1-dafd-46cb-ae0b-2c86778035f3\") " Nov 21 14:44:15 crc kubenswrapper[5133]: I1121 14:44:15.960206 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1908b0b1-dafd-46cb-ae0b-2c86778035f3-public-tls-certs\") pod \"1908b0b1-dafd-46cb-ae0b-2c86778035f3\" (UID: \"1908b0b1-dafd-46cb-ae0b-2c86778035f3\") " Nov 21 14:44:15 crc kubenswrapper[5133]: I1121 14:44:15.960619 5133 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/ee189c5e-52ad-4eaf-8415-df2b7f696a0a-ceph\") on node \"crc\" DevicePath \"\"" Nov 21 14:44:15 crc kubenswrapper[5133]: I1121 14:44:15.960630 5133 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee189c5e-52ad-4eaf-8415-df2b7f696a0a-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 14:44:15 crc kubenswrapper[5133]: I1121 14:44:15.960639 5133 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ee189c5e-52ad-4eaf-8415-df2b7f696a0a-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 21 14:44:15 crc kubenswrapper[5133]: I1121 14:44:15.960656 5133 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" " Nov 21 14:44:15 crc kubenswrapper[5133]: I1121 14:44:15.960666 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dgmkf\" (UniqueName: \"kubernetes.io/projected/ee189c5e-52ad-4eaf-8415-df2b7f696a0a-kube-api-access-dgmkf\") on node \"crc\" DevicePath \"\"" Nov 21 14:44:15 crc kubenswrapper[5133]: I1121 14:44:15.960675 5133 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1908b0b1-dafd-46cb-ae0b-2c86778035f3-logs\") on node \"crc\" DevicePath \"\"" Nov 21 14:44:15 crc kubenswrapper[5133]: I1121 14:44:15.960683 5133 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ee189c5e-52ad-4eaf-8415-df2b7f696a0a-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 21 14:44:15 crc kubenswrapper[5133]: I1121 14:44:15.960690 5133 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ee189c5e-52ad-4eaf-8415-df2b7f696a0a-logs\") on node \"crc\" DevicePath \"\"" Nov 21 14:44:15 crc kubenswrapper[5133]: I1121 14:44:15.960698 5133 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee189c5e-52ad-4eaf-8415-df2b7f696a0a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 14:44:15 crc kubenswrapper[5133]: I1121 14:44:15.960706 5133 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ee189c5e-52ad-4eaf-8415-df2b7f696a0a-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 14:44:15 crc kubenswrapper[5133]: I1121 14:44:15.961497 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1908b0b1-dafd-46cb-ae0b-2c86778035f3-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "1908b0b1-dafd-46cb-ae0b-2c86778035f3" (UID: "1908b0b1-dafd-46cb-ae0b-2c86778035f3"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:44:15 crc kubenswrapper[5133]: I1121 14:44:15.968197 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage09-crc" (OuterVolumeSpecName: "glance") pod "1908b0b1-dafd-46cb-ae0b-2c86778035f3" (UID: "1908b0b1-dafd-46cb-ae0b-2c86778035f3"). InnerVolumeSpecName "local-storage09-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 21 14:44:15 crc kubenswrapper[5133]: I1121 14:44:15.986702 5133 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage04-crc" (UniqueName: "kubernetes.io/local-volume/local-storage04-crc") on node "crc" Nov 21 14:44:15 crc kubenswrapper[5133]: I1121 14:44:15.986815 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1908b0b1-dafd-46cb-ae0b-2c86778035f3-scripts" (OuterVolumeSpecName: "scripts") pod "1908b0b1-dafd-46cb-ae0b-2c86778035f3" (UID: "1908b0b1-dafd-46cb-ae0b-2c86778035f3"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:44:15 crc kubenswrapper[5133]: I1121 14:44:15.988692 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1908b0b1-dafd-46cb-ae0b-2c86778035f3-kube-api-access-mszkm" (OuterVolumeSpecName: "kube-api-access-mszkm") pod "1908b0b1-dafd-46cb-ae0b-2c86778035f3" (UID: "1908b0b1-dafd-46cb-ae0b-2c86778035f3"). InnerVolumeSpecName "kube-api-access-mszkm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:44:15 crc kubenswrapper[5133]: I1121 14:44:15.988920 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1908b0b1-dafd-46cb-ae0b-2c86778035f3-ceph" (OuterVolumeSpecName: "ceph") pod "1908b0b1-dafd-46cb-ae0b-2c86778035f3" (UID: "1908b0b1-dafd-46cb-ae0b-2c86778035f3"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.017719 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1908b0b1-dafd-46cb-ae0b-2c86778035f3-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "1908b0b1-dafd-46cb-ae0b-2c86778035f3" (UID: "1908b0b1-dafd-46cb-ae0b-2c86778035f3"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.021239 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1908b0b1-dafd-46cb-ae0b-2c86778035f3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1908b0b1-dafd-46cb-ae0b-2c86778035f3" (UID: "1908b0b1-dafd-46cb-ae0b-2c86778035f3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.028133 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1908b0b1-dafd-46cb-ae0b-2c86778035f3-config-data" (OuterVolumeSpecName: "config-data") pod "1908b0b1-dafd-46cb-ae0b-2c86778035f3" (UID: "1908b0b1-dafd-46cb-ae0b-2c86778035f3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.061958 5133 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" " Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.061989 5133 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1908b0b1-dafd-46cb-ae0b-2c86778035f3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.062021 5133 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/1908b0b1-dafd-46cb-ae0b-2c86778035f3-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.062033 5133 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1908b0b1-dafd-46cb-ae0b-2c86778035f3-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.062045 5133 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1908b0b1-dafd-46cb-ae0b-2c86778035f3-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.062057 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mszkm\" (UniqueName: \"kubernetes.io/projected/1908b0b1-dafd-46cb-ae0b-2c86778035f3-kube-api-access-mszkm\") on node \"crc\" DevicePath \"\"" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.062067 5133 reconciler_common.go:293] "Volume detached for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" DevicePath \"\"" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.062076 5133 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1908b0b1-dafd-46cb-ae0b-2c86778035f3-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.062085 5133 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/1908b0b1-dafd-46cb-ae0b-2c86778035f3-ceph\") on node \"crc\" DevicePath \"\"" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.085953 5133 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage09-crc" (UniqueName: "kubernetes.io/local-volume/local-storage09-crc") on node "crc" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.115292 5133 generic.go:334] "Generic (PLEG): container finished" podID="ee189c5e-52ad-4eaf-8415-df2b7f696a0a" containerID="576f64b117398f008d0672a86fe02d406ca780dfa5228416bdea1ddb4613a04b" exitCode=143 Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.115328 5133 generic.go:334] "Generic (PLEG): container finished" podID="ee189c5e-52ad-4eaf-8415-df2b7f696a0a" containerID="38ab75861e650a9738c649d71adb68ad6e9326811a395e570f05583fe015faa5" exitCode=143 Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.115336 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"ee189c5e-52ad-4eaf-8415-df2b7f696a0a","Type":"ContainerDied","Data":"576f64b117398f008d0672a86fe02d406ca780dfa5228416bdea1ddb4613a04b"} Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.115384 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"ee189c5e-52ad-4eaf-8415-df2b7f696a0a","Type":"ContainerDied","Data":"38ab75861e650a9738c649d71adb68ad6e9326811a395e570f05583fe015faa5"} Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.115384 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.115395 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"ee189c5e-52ad-4eaf-8415-df2b7f696a0a","Type":"ContainerDied","Data":"91ea51424f624d3b72b8a61445968eb1320a69013f5d9fee5adbe03cf67d7e93"} Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.115418 5133 scope.go:117] "RemoveContainer" containerID="576f64b117398f008d0672a86fe02d406ca780dfa5228416bdea1ddb4613a04b" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.120323 5133 generic.go:334] "Generic (PLEG): container finished" podID="1908b0b1-dafd-46cb-ae0b-2c86778035f3" containerID="152011da734cb97dba7e32fd47dce61c1f9d9e95bcea7a0b17e4e370c16e5f7b" exitCode=0 Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.120354 5133 generic.go:334] "Generic (PLEG): container finished" podID="1908b0b1-dafd-46cb-ae0b-2c86778035f3" containerID="546f68823718c4bb671ba94fec1382fd9def50bb3ca511c98b85929c872db9e3" exitCode=143 Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.120369 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.120430 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"1908b0b1-dafd-46cb-ae0b-2c86778035f3","Type":"ContainerDied","Data":"152011da734cb97dba7e32fd47dce61c1f9d9e95bcea7a0b17e4e370c16e5f7b"} Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.120458 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"1908b0b1-dafd-46cb-ae0b-2c86778035f3","Type":"ContainerDied","Data":"546f68823718c4bb671ba94fec1382fd9def50bb3ca511c98b85929c872db9e3"} Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.120472 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"1908b0b1-dafd-46cb-ae0b-2c86778035f3","Type":"ContainerDied","Data":"2c1caa8ad135cf9ad1dcdeb0d31f290e2f824f42cc2a33e012cb44ac7561da2b"} Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.146610 5133 scope.go:117] "RemoveContainer" containerID="38ab75861e650a9738c649d71adb68ad6e9326811a395e570f05583fe015faa5" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.179340 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.168467 5133 reconciler_common.go:293] "Volume detached for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" DevicePath \"\"" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.199379 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.210695 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.222075 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.228070 5133 scope.go:117] "RemoveContainer" containerID="576f64b117398f008d0672a86fe02d406ca780dfa5228416bdea1ddb4613a04b" Nov 21 14:44:16 crc kubenswrapper[5133]: E1121 14:44:16.229703 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"576f64b117398f008d0672a86fe02d406ca780dfa5228416bdea1ddb4613a04b\": container with ID starting with 576f64b117398f008d0672a86fe02d406ca780dfa5228416bdea1ddb4613a04b not found: ID does not exist" containerID="576f64b117398f008d0672a86fe02d406ca780dfa5228416bdea1ddb4613a04b" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.229737 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"576f64b117398f008d0672a86fe02d406ca780dfa5228416bdea1ddb4613a04b"} err="failed to get container status \"576f64b117398f008d0672a86fe02d406ca780dfa5228416bdea1ddb4613a04b\": rpc error: code = NotFound desc = could not find container \"576f64b117398f008d0672a86fe02d406ca780dfa5228416bdea1ddb4613a04b\": container with ID starting with 576f64b117398f008d0672a86fe02d406ca780dfa5228416bdea1ddb4613a04b not found: ID does not exist" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.229759 5133 scope.go:117] "RemoveContainer" containerID="38ab75861e650a9738c649d71adb68ad6e9326811a395e570f05583fe015faa5" Nov 21 14:44:16 crc kubenswrapper[5133]: E1121 14:44:16.230204 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"38ab75861e650a9738c649d71adb68ad6e9326811a395e570f05583fe015faa5\": container with ID starting with 38ab75861e650a9738c649d71adb68ad6e9326811a395e570f05583fe015faa5 not found: ID does not exist" containerID="38ab75861e650a9738c649d71adb68ad6e9326811a395e570f05583fe015faa5" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.230236 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"38ab75861e650a9738c649d71adb68ad6e9326811a395e570f05583fe015faa5"} err="failed to get container status \"38ab75861e650a9738c649d71adb68ad6e9326811a395e570f05583fe015faa5\": rpc error: code = NotFound desc = could not find container \"38ab75861e650a9738c649d71adb68ad6e9326811a395e570f05583fe015faa5\": container with ID starting with 38ab75861e650a9738c649d71adb68ad6e9326811a395e570f05583fe015faa5 not found: ID does not exist" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.230256 5133 scope.go:117] "RemoveContainer" containerID="576f64b117398f008d0672a86fe02d406ca780dfa5228416bdea1ddb4613a04b" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.230598 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"576f64b117398f008d0672a86fe02d406ca780dfa5228416bdea1ddb4613a04b"} err="failed to get container status \"576f64b117398f008d0672a86fe02d406ca780dfa5228416bdea1ddb4613a04b\": rpc error: code = NotFound desc = could not find container \"576f64b117398f008d0672a86fe02d406ca780dfa5228416bdea1ddb4613a04b\": container with ID starting with 576f64b117398f008d0672a86fe02d406ca780dfa5228416bdea1ddb4613a04b not found: ID does not exist" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.230618 5133 scope.go:117] "RemoveContainer" containerID="38ab75861e650a9738c649d71adb68ad6e9326811a395e570f05583fe015faa5" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.230882 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"38ab75861e650a9738c649d71adb68ad6e9326811a395e570f05583fe015faa5"} err="failed to get container status \"38ab75861e650a9738c649d71adb68ad6e9326811a395e570f05583fe015faa5\": rpc error: code = NotFound desc = could not find container \"38ab75861e650a9738c649d71adb68ad6e9326811a395e570f05583fe015faa5\": container with ID starting with 38ab75861e650a9738c649d71adb68ad6e9326811a395e570f05583fe015faa5 not found: ID does not exist" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.230898 5133 scope.go:117] "RemoveContainer" containerID="152011da734cb97dba7e32fd47dce61c1f9d9e95bcea7a0b17e4e370c16e5f7b" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.245404 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 21 14:44:16 crc kubenswrapper[5133]: E1121 14:44:16.245858 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1908b0b1-dafd-46cb-ae0b-2c86778035f3" containerName="glance-log" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.245881 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="1908b0b1-dafd-46cb-ae0b-2c86778035f3" containerName="glance-log" Nov 21 14:44:16 crc kubenswrapper[5133]: E1121 14:44:16.245894 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1908b0b1-dafd-46cb-ae0b-2c86778035f3" containerName="glance-httpd" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.245901 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="1908b0b1-dafd-46cb-ae0b-2c86778035f3" containerName="glance-httpd" Nov 21 14:44:16 crc kubenswrapper[5133]: E1121 14:44:16.245925 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6dd226ff-efe4-4adb-b60f-68641e1226a8" containerName="mariadb-account-create" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.245934 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="6dd226ff-efe4-4adb-b60f-68641e1226a8" containerName="mariadb-account-create" Nov 21 14:44:16 crc kubenswrapper[5133]: E1121 14:44:16.245954 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee189c5e-52ad-4eaf-8415-df2b7f696a0a" containerName="glance-httpd" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.245962 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee189c5e-52ad-4eaf-8415-df2b7f696a0a" containerName="glance-httpd" Nov 21 14:44:16 crc kubenswrapper[5133]: E1121 14:44:16.245975 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0c2588b0-ad67-4806-a52c-7b0e7a7302c8" containerName="mariadb-database-create" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.245980 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="0c2588b0-ad67-4806-a52c-7b0e7a7302c8" containerName="mariadb-database-create" Nov 21 14:44:16 crc kubenswrapper[5133]: E1121 14:44:16.246035 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee189c5e-52ad-4eaf-8415-df2b7f696a0a" containerName="glance-log" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.246046 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee189c5e-52ad-4eaf-8415-df2b7f696a0a" containerName="glance-log" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.246847 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="1908b0b1-dafd-46cb-ae0b-2c86778035f3" containerName="glance-log" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.246873 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="ee189c5e-52ad-4eaf-8415-df2b7f696a0a" containerName="glance-httpd" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.246890 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="1908b0b1-dafd-46cb-ae0b-2c86778035f3" containerName="glance-httpd" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.246917 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="ee189c5e-52ad-4eaf-8415-df2b7f696a0a" containerName="glance-log" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.246932 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="6dd226ff-efe4-4adb-b60f-68641e1226a8" containerName="mariadb-account-create" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.246943 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="0c2588b0-ad67-4806-a52c-7b0e7a7302c8" containerName="mariadb-database-create" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.247992 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.249952 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.250293 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.256274 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.260211 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.262898 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-cvwrt" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.269949 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.290659 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.291268 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.296419 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.296623 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.314656 5133 scope.go:117] "RemoveContainer" containerID="546f68823718c4bb671ba94fec1382fd9def50bb3ca511c98b85929c872db9e3" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.361709 5133 scope.go:117] "RemoveContainer" containerID="152011da734cb97dba7e32fd47dce61c1f9d9e95bcea7a0b17e4e370c16e5f7b" Nov 21 14:44:16 crc kubenswrapper[5133]: E1121 14:44:16.362252 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"152011da734cb97dba7e32fd47dce61c1f9d9e95bcea7a0b17e4e370c16e5f7b\": container with ID starting with 152011da734cb97dba7e32fd47dce61c1f9d9e95bcea7a0b17e4e370c16e5f7b not found: ID does not exist" containerID="152011da734cb97dba7e32fd47dce61c1f9d9e95bcea7a0b17e4e370c16e5f7b" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.362287 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"152011da734cb97dba7e32fd47dce61c1f9d9e95bcea7a0b17e4e370c16e5f7b"} err="failed to get container status \"152011da734cb97dba7e32fd47dce61c1f9d9e95bcea7a0b17e4e370c16e5f7b\": rpc error: code = NotFound desc = could not find container \"152011da734cb97dba7e32fd47dce61c1f9d9e95bcea7a0b17e4e370c16e5f7b\": container with ID starting with 152011da734cb97dba7e32fd47dce61c1f9d9e95bcea7a0b17e4e370c16e5f7b not found: ID does not exist" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.362314 5133 scope.go:117] "RemoveContainer" containerID="546f68823718c4bb671ba94fec1382fd9def50bb3ca511c98b85929c872db9e3" Nov 21 14:44:16 crc kubenswrapper[5133]: E1121 14:44:16.362764 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"546f68823718c4bb671ba94fec1382fd9def50bb3ca511c98b85929c872db9e3\": container with ID starting with 546f68823718c4bb671ba94fec1382fd9def50bb3ca511c98b85929c872db9e3 not found: ID does not exist" containerID="546f68823718c4bb671ba94fec1382fd9def50bb3ca511c98b85929c872db9e3" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.362789 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"546f68823718c4bb671ba94fec1382fd9def50bb3ca511c98b85929c872db9e3"} err="failed to get container status \"546f68823718c4bb671ba94fec1382fd9def50bb3ca511c98b85929c872db9e3\": rpc error: code = NotFound desc = could not find container \"546f68823718c4bb671ba94fec1382fd9def50bb3ca511c98b85929c872db9e3\": container with ID starting with 546f68823718c4bb671ba94fec1382fd9def50bb3ca511c98b85929c872db9e3 not found: ID does not exist" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.362807 5133 scope.go:117] "RemoveContainer" containerID="152011da734cb97dba7e32fd47dce61c1f9d9e95bcea7a0b17e4e370c16e5f7b" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.363238 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"152011da734cb97dba7e32fd47dce61c1f9d9e95bcea7a0b17e4e370c16e5f7b"} err="failed to get container status \"152011da734cb97dba7e32fd47dce61c1f9d9e95bcea7a0b17e4e370c16e5f7b\": rpc error: code = NotFound desc = could not find container \"152011da734cb97dba7e32fd47dce61c1f9d9e95bcea7a0b17e4e370c16e5f7b\": container with ID starting with 152011da734cb97dba7e32fd47dce61c1f9d9e95bcea7a0b17e4e370c16e5f7b not found: ID does not exist" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.363260 5133 scope.go:117] "RemoveContainer" containerID="546f68823718c4bb671ba94fec1382fd9def50bb3ca511c98b85929c872db9e3" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.363499 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"546f68823718c4bb671ba94fec1382fd9def50bb3ca511c98b85929c872db9e3"} err="failed to get container status \"546f68823718c4bb671ba94fec1382fd9def50bb3ca511c98b85929c872db9e3\": rpc error: code = NotFound desc = could not find container \"546f68823718c4bb671ba94fec1382fd9def50bb3ca511c98b85929c872db9e3\": container with ID starting with 546f68823718c4bb671ba94fec1382fd9def50bb3ca511c98b85929c872db9e3 not found: ID does not exist" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.393540 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bc682d32-5242-41ad-8040-1b88a4ed2534-config-data\") pod \"glance-default-external-api-0\" (UID: \"bc682d32-5242-41ad-8040-1b88a4ed2534\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.393638 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/bc682d32-5242-41ad-8040-1b88a4ed2534-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"bc682d32-5242-41ad-8040-1b88a4ed2534\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.393665 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"bc682d32-5242-41ad-8040-1b88a4ed2534\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.393744 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/bc682d32-5242-41ad-8040-1b88a4ed2534-ceph\") pod \"glance-default-external-api-0\" (UID: \"bc682d32-5242-41ad-8040-1b88a4ed2534\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.393783 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bc682d32-5242-41ad-8040-1b88a4ed2534-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"bc682d32-5242-41ad-8040-1b88a4ed2534\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.393801 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/73ddb6a4-cb5d-4c55-925b-3a2047cc35d1-scripts\") pod \"glance-default-internal-api-0\" (UID: \"73ddb6a4-cb5d-4c55-925b-3a2047cc35d1\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.393866 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/73ddb6a4-cb5d-4c55-925b-3a2047cc35d1-config-data\") pod \"glance-default-internal-api-0\" (UID: \"73ddb6a4-cb5d-4c55-925b-3a2047cc35d1\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.393887 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/73ddb6a4-cb5d-4c55-925b-3a2047cc35d1-ceph\") pod \"glance-default-internal-api-0\" (UID: \"73ddb6a4-cb5d-4c55-925b-3a2047cc35d1\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.393901 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bc682d32-5242-41ad-8040-1b88a4ed2534-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"bc682d32-5242-41ad-8040-1b88a4ed2534\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.393928 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r4wdn\" (UniqueName: \"kubernetes.io/projected/73ddb6a4-cb5d-4c55-925b-3a2047cc35d1-kube-api-access-r4wdn\") pod \"glance-default-internal-api-0\" (UID: \"73ddb6a4-cb5d-4c55-925b-3a2047cc35d1\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.393957 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"73ddb6a4-cb5d-4c55-925b-3a2047cc35d1\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.394049 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bc682d32-5242-41ad-8040-1b88a4ed2534-scripts\") pod \"glance-default-external-api-0\" (UID: \"bc682d32-5242-41ad-8040-1b88a4ed2534\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.394088 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xjfbw\" (UniqueName: \"kubernetes.io/projected/bc682d32-5242-41ad-8040-1b88a4ed2534-kube-api-access-xjfbw\") pod \"glance-default-external-api-0\" (UID: \"bc682d32-5242-41ad-8040-1b88a4ed2534\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.394109 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bc682d32-5242-41ad-8040-1b88a4ed2534-logs\") pod \"glance-default-external-api-0\" (UID: \"bc682d32-5242-41ad-8040-1b88a4ed2534\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.394126 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/73ddb6a4-cb5d-4c55-925b-3a2047cc35d1-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"73ddb6a4-cb5d-4c55-925b-3a2047cc35d1\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.394148 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/73ddb6a4-cb5d-4c55-925b-3a2047cc35d1-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"73ddb6a4-cb5d-4c55-925b-3a2047cc35d1\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.394304 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/73ddb6a4-cb5d-4c55-925b-3a2047cc35d1-logs\") pod \"glance-default-internal-api-0\" (UID: \"73ddb6a4-cb5d-4c55-925b-3a2047cc35d1\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.394341 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/73ddb6a4-cb5d-4c55-925b-3a2047cc35d1-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"73ddb6a4-cb5d-4c55-925b-3a2047cc35d1\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.474250 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1908b0b1-dafd-46cb-ae0b-2c86778035f3" path="/var/lib/kubelet/pods/1908b0b1-dafd-46cb-ae0b-2c86778035f3/volumes" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.474984 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ee189c5e-52ad-4eaf-8415-df2b7f696a0a" path="/var/lib/kubelet/pods/ee189c5e-52ad-4eaf-8415-df2b7f696a0a/volumes" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.495914 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bc682d32-5242-41ad-8040-1b88a4ed2534-logs\") pod \"glance-default-external-api-0\" (UID: \"bc682d32-5242-41ad-8040-1b88a4ed2534\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.495959 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/73ddb6a4-cb5d-4c55-925b-3a2047cc35d1-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"73ddb6a4-cb5d-4c55-925b-3a2047cc35d1\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.495981 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/73ddb6a4-cb5d-4c55-925b-3a2047cc35d1-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"73ddb6a4-cb5d-4c55-925b-3a2047cc35d1\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.496029 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/73ddb6a4-cb5d-4c55-925b-3a2047cc35d1-logs\") pod \"glance-default-internal-api-0\" (UID: \"73ddb6a4-cb5d-4c55-925b-3a2047cc35d1\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.496048 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/73ddb6a4-cb5d-4c55-925b-3a2047cc35d1-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"73ddb6a4-cb5d-4c55-925b-3a2047cc35d1\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.496075 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bc682d32-5242-41ad-8040-1b88a4ed2534-config-data\") pod \"glance-default-external-api-0\" (UID: \"bc682d32-5242-41ad-8040-1b88a4ed2534\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.496093 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/bc682d32-5242-41ad-8040-1b88a4ed2534-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"bc682d32-5242-41ad-8040-1b88a4ed2534\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.496115 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"bc682d32-5242-41ad-8040-1b88a4ed2534\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.496149 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/bc682d32-5242-41ad-8040-1b88a4ed2534-ceph\") pod \"glance-default-external-api-0\" (UID: \"bc682d32-5242-41ad-8040-1b88a4ed2534\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.496166 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bc682d32-5242-41ad-8040-1b88a4ed2534-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"bc682d32-5242-41ad-8040-1b88a4ed2534\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.496180 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/73ddb6a4-cb5d-4c55-925b-3a2047cc35d1-scripts\") pod \"glance-default-internal-api-0\" (UID: \"73ddb6a4-cb5d-4c55-925b-3a2047cc35d1\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.496218 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/73ddb6a4-cb5d-4c55-925b-3a2047cc35d1-config-data\") pod \"glance-default-internal-api-0\" (UID: \"73ddb6a4-cb5d-4c55-925b-3a2047cc35d1\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.496236 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/73ddb6a4-cb5d-4c55-925b-3a2047cc35d1-ceph\") pod \"glance-default-internal-api-0\" (UID: \"73ddb6a4-cb5d-4c55-925b-3a2047cc35d1\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.496251 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bc682d32-5242-41ad-8040-1b88a4ed2534-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"bc682d32-5242-41ad-8040-1b88a4ed2534\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.496279 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r4wdn\" (UniqueName: \"kubernetes.io/projected/73ddb6a4-cb5d-4c55-925b-3a2047cc35d1-kube-api-access-r4wdn\") pod \"glance-default-internal-api-0\" (UID: \"73ddb6a4-cb5d-4c55-925b-3a2047cc35d1\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.496306 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"73ddb6a4-cb5d-4c55-925b-3a2047cc35d1\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.496329 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bc682d32-5242-41ad-8040-1b88a4ed2534-scripts\") pod \"glance-default-external-api-0\" (UID: \"bc682d32-5242-41ad-8040-1b88a4ed2534\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.496364 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xjfbw\" (UniqueName: \"kubernetes.io/projected/bc682d32-5242-41ad-8040-1b88a4ed2534-kube-api-access-xjfbw\") pod \"glance-default-external-api-0\" (UID: \"bc682d32-5242-41ad-8040-1b88a4ed2534\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.496530 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/73ddb6a4-cb5d-4c55-925b-3a2047cc35d1-logs\") pod \"glance-default-internal-api-0\" (UID: \"73ddb6a4-cb5d-4c55-925b-3a2047cc35d1\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.496876 5133 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"73ddb6a4-cb5d-4c55-925b-3a2047cc35d1\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/glance-default-internal-api-0" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.497448 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bc682d32-5242-41ad-8040-1b88a4ed2534-logs\") pod \"glance-default-external-api-0\" (UID: \"bc682d32-5242-41ad-8040-1b88a4ed2534\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.499022 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/73ddb6a4-cb5d-4c55-925b-3a2047cc35d1-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"73ddb6a4-cb5d-4c55-925b-3a2047cc35d1\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.499795 5133 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"bc682d32-5242-41ad-8040-1b88a4ed2534\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/glance-default-external-api-0" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.501241 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/bc682d32-5242-41ad-8040-1b88a4ed2534-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"bc682d32-5242-41ad-8040-1b88a4ed2534\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.506236 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bc682d32-5242-41ad-8040-1b88a4ed2534-config-data\") pod \"glance-default-external-api-0\" (UID: \"bc682d32-5242-41ad-8040-1b88a4ed2534\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.508507 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/73ddb6a4-cb5d-4c55-925b-3a2047cc35d1-scripts\") pod \"glance-default-internal-api-0\" (UID: \"73ddb6a4-cb5d-4c55-925b-3a2047cc35d1\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.509330 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bc682d32-5242-41ad-8040-1b88a4ed2534-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"bc682d32-5242-41ad-8040-1b88a4ed2534\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.509570 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/bc682d32-5242-41ad-8040-1b88a4ed2534-ceph\") pod \"glance-default-external-api-0\" (UID: \"bc682d32-5242-41ad-8040-1b88a4ed2534\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.509827 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/73ddb6a4-cb5d-4c55-925b-3a2047cc35d1-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"73ddb6a4-cb5d-4c55-925b-3a2047cc35d1\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.510020 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/73ddb6a4-cb5d-4c55-925b-3a2047cc35d1-ceph\") pod \"glance-default-internal-api-0\" (UID: \"73ddb6a4-cb5d-4c55-925b-3a2047cc35d1\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.511193 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bc682d32-5242-41ad-8040-1b88a4ed2534-scripts\") pod \"glance-default-external-api-0\" (UID: \"bc682d32-5242-41ad-8040-1b88a4ed2534\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.512813 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/73ddb6a4-cb5d-4c55-925b-3a2047cc35d1-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"73ddb6a4-cb5d-4c55-925b-3a2047cc35d1\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.514182 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r4wdn\" (UniqueName: \"kubernetes.io/projected/73ddb6a4-cb5d-4c55-925b-3a2047cc35d1-kube-api-access-r4wdn\") pod \"glance-default-internal-api-0\" (UID: \"73ddb6a4-cb5d-4c55-925b-3a2047cc35d1\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.517477 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/73ddb6a4-cb5d-4c55-925b-3a2047cc35d1-config-data\") pod \"glance-default-internal-api-0\" (UID: \"73ddb6a4-cb5d-4c55-925b-3a2047cc35d1\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.527842 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bc682d32-5242-41ad-8040-1b88a4ed2534-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"bc682d32-5242-41ad-8040-1b88a4ed2534\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.529761 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xjfbw\" (UniqueName: \"kubernetes.io/projected/bc682d32-5242-41ad-8040-1b88a4ed2534-kube-api-access-xjfbw\") pod \"glance-default-external-api-0\" (UID: \"bc682d32-5242-41ad-8040-1b88a4ed2534\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.534945 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"73ddb6a4-cb5d-4c55-925b-3a2047cc35d1\") " pod="openstack/glance-default-internal-api-0" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.560332 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"bc682d32-5242-41ad-8040-1b88a4ed2534\") " pod="openstack/glance-default-external-api-0" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.576682 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 21 14:44:16 crc kubenswrapper[5133]: I1121 14:44:16.615908 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 21 14:44:17 crc kubenswrapper[5133]: I1121 14:44:17.145653 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 21 14:44:17 crc kubenswrapper[5133]: I1121 14:44:17.244496 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 21 14:44:17 crc kubenswrapper[5133]: W1121 14:44:17.265649 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbc682d32_5242_41ad_8040_1b88a4ed2534.slice/crio-4b64f7da717fb6d31668b44de29f9a94ff74e5a0c2d1e468485c53e1a2a50ea5 WatchSource:0}: Error finding container 4b64f7da717fb6d31668b44de29f9a94ff74e5a0c2d1e468485c53e1a2a50ea5: Status 404 returned error can't find the container with id 4b64f7da717fb6d31668b44de29f9a94ff74e5a0c2d1e468485c53e1a2a50ea5 Nov 21 14:44:18 crc kubenswrapper[5133]: I1121 14:44:18.157697 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"73ddb6a4-cb5d-4c55-925b-3a2047cc35d1","Type":"ContainerStarted","Data":"160d9b1f52cd8751a364de94af74b12a6f9bf916ba1e862e04aa9f9be12e6f25"} Nov 21 14:44:18 crc kubenswrapper[5133]: I1121 14:44:18.158176 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"73ddb6a4-cb5d-4c55-925b-3a2047cc35d1","Type":"ContainerStarted","Data":"ea081cb1d161eb26502703dc451d4ff6862d14a203292534df70ba2abc3db63b"} Nov 21 14:44:18 crc kubenswrapper[5133]: I1121 14:44:18.159495 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"bc682d32-5242-41ad-8040-1b88a4ed2534","Type":"ContainerStarted","Data":"4b64f7da717fb6d31668b44de29f9a94ff74e5a0c2d1e468485c53e1a2a50ea5"} Nov 21 14:44:18 crc kubenswrapper[5133]: I1121 14:44:18.905312 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-volume-volume1-0" Nov 21 14:44:18 crc kubenswrapper[5133]: I1121 14:44:18.994056 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-backup-0" Nov 21 14:44:19 crc kubenswrapper[5133]: I1121 14:44:19.110519 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-volume-volume1-0" Nov 21 14:44:19 crc kubenswrapper[5133]: I1121 14:44:19.239185 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-backup-0" Nov 21 14:44:19 crc kubenswrapper[5133]: I1121 14:44:19.711324 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-db-sync-k4xmx"] Nov 21 14:44:19 crc kubenswrapper[5133]: I1121 14:44:19.712581 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-sync-k4xmx" Nov 21 14:44:19 crc kubenswrapper[5133]: I1121 14:44:19.716474 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-config-data" Nov 21 14:44:19 crc kubenswrapper[5133]: I1121 14:44:19.716618 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-manila-dockercfg-5zdnm" Nov 21 14:44:19 crc kubenswrapper[5133]: I1121 14:44:19.736566 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-db-sync-k4xmx"] Nov 21 14:44:19 crc kubenswrapper[5133]: I1121 14:44:19.769916 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ws7d5\" (UniqueName: \"kubernetes.io/projected/4126fdfa-1d4a-4276-951e-c1a26a3492dd-kube-api-access-ws7d5\") pod \"manila-db-sync-k4xmx\" (UID: \"4126fdfa-1d4a-4276-951e-c1a26a3492dd\") " pod="openstack/manila-db-sync-k4xmx" Nov 21 14:44:19 crc kubenswrapper[5133]: I1121 14:44:19.769964 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4126fdfa-1d4a-4276-951e-c1a26a3492dd-config-data\") pod \"manila-db-sync-k4xmx\" (UID: \"4126fdfa-1d4a-4276-951e-c1a26a3492dd\") " pod="openstack/manila-db-sync-k4xmx" Nov 21 14:44:19 crc kubenswrapper[5133]: I1121 14:44:19.770061 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/4126fdfa-1d4a-4276-951e-c1a26a3492dd-job-config-data\") pod \"manila-db-sync-k4xmx\" (UID: \"4126fdfa-1d4a-4276-951e-c1a26a3492dd\") " pod="openstack/manila-db-sync-k4xmx" Nov 21 14:44:19 crc kubenswrapper[5133]: I1121 14:44:19.770105 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4126fdfa-1d4a-4276-951e-c1a26a3492dd-combined-ca-bundle\") pod \"manila-db-sync-k4xmx\" (UID: \"4126fdfa-1d4a-4276-951e-c1a26a3492dd\") " pod="openstack/manila-db-sync-k4xmx" Nov 21 14:44:19 crc kubenswrapper[5133]: I1121 14:44:19.876073 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/4126fdfa-1d4a-4276-951e-c1a26a3492dd-job-config-data\") pod \"manila-db-sync-k4xmx\" (UID: \"4126fdfa-1d4a-4276-951e-c1a26a3492dd\") " pod="openstack/manila-db-sync-k4xmx" Nov 21 14:44:19 crc kubenswrapper[5133]: I1121 14:44:19.876159 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4126fdfa-1d4a-4276-951e-c1a26a3492dd-combined-ca-bundle\") pod \"manila-db-sync-k4xmx\" (UID: \"4126fdfa-1d4a-4276-951e-c1a26a3492dd\") " pod="openstack/manila-db-sync-k4xmx" Nov 21 14:44:19 crc kubenswrapper[5133]: I1121 14:44:19.876300 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ws7d5\" (UniqueName: \"kubernetes.io/projected/4126fdfa-1d4a-4276-951e-c1a26a3492dd-kube-api-access-ws7d5\") pod \"manila-db-sync-k4xmx\" (UID: \"4126fdfa-1d4a-4276-951e-c1a26a3492dd\") " pod="openstack/manila-db-sync-k4xmx" Nov 21 14:44:19 crc kubenswrapper[5133]: I1121 14:44:19.876331 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4126fdfa-1d4a-4276-951e-c1a26a3492dd-config-data\") pod \"manila-db-sync-k4xmx\" (UID: \"4126fdfa-1d4a-4276-951e-c1a26a3492dd\") " pod="openstack/manila-db-sync-k4xmx" Nov 21 14:44:19 crc kubenswrapper[5133]: I1121 14:44:19.883090 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4126fdfa-1d4a-4276-951e-c1a26a3492dd-config-data\") pod \"manila-db-sync-k4xmx\" (UID: \"4126fdfa-1d4a-4276-951e-c1a26a3492dd\") " pod="openstack/manila-db-sync-k4xmx" Nov 21 14:44:19 crc kubenswrapper[5133]: I1121 14:44:19.883935 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/4126fdfa-1d4a-4276-951e-c1a26a3492dd-job-config-data\") pod \"manila-db-sync-k4xmx\" (UID: \"4126fdfa-1d4a-4276-951e-c1a26a3492dd\") " pod="openstack/manila-db-sync-k4xmx" Nov 21 14:44:19 crc kubenswrapper[5133]: I1121 14:44:19.885969 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4126fdfa-1d4a-4276-951e-c1a26a3492dd-combined-ca-bundle\") pod \"manila-db-sync-k4xmx\" (UID: \"4126fdfa-1d4a-4276-951e-c1a26a3492dd\") " pod="openstack/manila-db-sync-k4xmx" Nov 21 14:44:19 crc kubenswrapper[5133]: I1121 14:44:19.891736 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ws7d5\" (UniqueName: \"kubernetes.io/projected/4126fdfa-1d4a-4276-951e-c1a26a3492dd-kube-api-access-ws7d5\") pod \"manila-db-sync-k4xmx\" (UID: \"4126fdfa-1d4a-4276-951e-c1a26a3492dd\") " pod="openstack/manila-db-sync-k4xmx" Nov 21 14:44:20 crc kubenswrapper[5133]: I1121 14:44:20.042641 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-sync-k4xmx" Nov 21 14:44:23 crc kubenswrapper[5133]: I1121 14:44:23.311013 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 14:44:23 crc kubenswrapper[5133]: I1121 14:44:23.312196 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 14:44:27 crc kubenswrapper[5133]: I1121 14:44:27.047535 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-db-sync-k4xmx"] Nov 21 14:44:27 crc kubenswrapper[5133]: W1121 14:44:27.060141 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4126fdfa_1d4a_4276_951e_c1a26a3492dd.slice/crio-83c13b3c99d2005fdb839ed270176b0677b505e8da404bc7c6a87179c006a84c WatchSource:0}: Error finding container 83c13b3c99d2005fdb839ed270176b0677b505e8da404bc7c6a87179c006a84c: Status 404 returned error can't find the container with id 83c13b3c99d2005fdb839ed270176b0677b505e8da404bc7c6a87179c006a84c Nov 21 14:44:27 crc kubenswrapper[5133]: I1121 14:44:27.064263 5133 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 21 14:44:27 crc kubenswrapper[5133]: I1121 14:44:27.253356 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-sync-k4xmx" event={"ID":"4126fdfa-1d4a-4276-951e-c1a26a3492dd","Type":"ContainerStarted","Data":"83c13b3c99d2005fdb839ed270176b0677b505e8da404bc7c6a87179c006a84c"} Nov 21 14:44:27 crc kubenswrapper[5133]: I1121 14:44:27.256287 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-85888d8669-n7vg6" event={"ID":"120ff136-ecbc-4f53-9338-d1435003710a","Type":"ContainerStarted","Data":"21ad40de45d9cde3e9d845fb48b069aaf4dfaa28c5880c10e378a91c37c88411"} Nov 21 14:44:27 crc kubenswrapper[5133]: I1121 14:44:27.258487 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-c44865549-jt2j7" event={"ID":"a343caf3-f248-4c28-8131-d074688735ad","Type":"ContainerStarted","Data":"33362cc9d398695cf2fb634b04ad82951eaaf19a689fe77f97297db9faf80cc6"} Nov 21 14:44:27 crc kubenswrapper[5133]: I1121 14:44:27.260088 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-fc96bd488-z7zf6" event={"ID":"63a7b11c-9f04-4a20-a705-a8ae4fe98285","Type":"ContainerStarted","Data":"8a673cfc580e0cbfdc91c9fd6e409f0dd4ef3e564de6c8bd155f353690ce5481"} Nov 21 14:44:27 crc kubenswrapper[5133]: I1121 14:44:27.262093 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6494f4b7cd-7bgbs" event={"ID":"ce180175-f40a-48ea-bf15-1c8bfa8ff9aa","Type":"ContainerStarted","Data":"138e4b7431cd22840f60d7c51b66196bf2148c2bc3dce7794bdf09247f6ef235"} Nov 21 14:44:28 crc kubenswrapper[5133]: I1121 14:44:28.273012 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-c44865549-jt2j7" event={"ID":"a343caf3-f248-4c28-8131-d074688735ad","Type":"ContainerStarted","Data":"87f0653444432a51eef4f6b8be05fb9e4d940372d7368273b305f6be5555c9fd"} Nov 21 14:44:28 crc kubenswrapper[5133]: I1121 14:44:28.273045 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-c44865549-jt2j7" podUID="a343caf3-f248-4c28-8131-d074688735ad" containerName="horizon-log" containerID="cri-o://33362cc9d398695cf2fb634b04ad82951eaaf19a689fe77f97297db9faf80cc6" gracePeriod=30 Nov 21 14:44:28 crc kubenswrapper[5133]: I1121 14:44:28.273257 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-c44865549-jt2j7" podUID="a343caf3-f248-4c28-8131-d074688735ad" containerName="horizon" containerID="cri-o://87f0653444432a51eef4f6b8be05fb9e4d940372d7368273b305f6be5555c9fd" gracePeriod=30 Nov 21 14:44:28 crc kubenswrapper[5133]: I1121 14:44:28.279566 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"73ddb6a4-cb5d-4c55-925b-3a2047cc35d1","Type":"ContainerStarted","Data":"dd727368de6ce1a2fb5b7469406e6cdea4496a49d897dd414629cca6314d64d6"} Nov 21 14:44:28 crc kubenswrapper[5133]: I1121 14:44:28.286693 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-fc96bd488-z7zf6" event={"ID":"63a7b11c-9f04-4a20-a705-a8ae4fe98285","Type":"ContainerStarted","Data":"5b43aa4d56390f27cc697f1ee7c55bbfbb2ac957fc240386054df7ec367e1031"} Nov 21 14:44:28 crc kubenswrapper[5133]: I1121 14:44:28.291311 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6494f4b7cd-7bgbs" event={"ID":"ce180175-f40a-48ea-bf15-1c8bfa8ff9aa","Type":"ContainerStarted","Data":"f290ab7ef779c07c5f3e045964d3e43e2ec377ebaec6c216577aa579a1edf0e7"} Nov 21 14:44:28 crc kubenswrapper[5133]: I1121 14:44:28.294435 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-85888d8669-n7vg6" event={"ID":"120ff136-ecbc-4f53-9338-d1435003710a","Type":"ContainerStarted","Data":"3de5a2fe389c44ad3c9c0eabe1945327d01d925031be3f61e25852bc7597867b"} Nov 21 14:44:28 crc kubenswrapper[5133]: I1121 14:44:28.294577 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-85888d8669-n7vg6" podUID="120ff136-ecbc-4f53-9338-d1435003710a" containerName="horizon-log" containerID="cri-o://21ad40de45d9cde3e9d845fb48b069aaf4dfaa28c5880c10e378a91c37c88411" gracePeriod=30 Nov 21 14:44:28 crc kubenswrapper[5133]: I1121 14:44:28.294595 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-85888d8669-n7vg6" podUID="120ff136-ecbc-4f53-9338-d1435003710a" containerName="horizon" containerID="cri-o://3de5a2fe389c44ad3c9c0eabe1945327d01d925031be3f61e25852bc7597867b" gracePeriod=30 Nov 21 14:44:28 crc kubenswrapper[5133]: I1121 14:44:28.299836 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-c44865549-jt2j7" podStartSLOduration=3.6524071830000002 podStartE2EDuration="19.29981025s" podCreationTimestamp="2025-11-21 14:44:09 +0000 UTC" firstStartedPulling="2025-11-21 14:44:11.096220176 +0000 UTC m=+3710.894052414" lastFinishedPulling="2025-11-21 14:44:26.743623233 +0000 UTC m=+3726.541455481" observedRunningTime="2025-11-21 14:44:28.29488705 +0000 UTC m=+3728.092719298" watchObservedRunningTime="2025-11-21 14:44:28.29981025 +0000 UTC m=+3728.097642508" Nov 21 14:44:28 crc kubenswrapper[5133]: I1121 14:44:28.300850 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"bc682d32-5242-41ad-8040-1b88a4ed2534","Type":"ContainerStarted","Data":"830e4529d22874b6000d4cb53c45d4fa815a6b4a047a38788c5101c5a5541989"} Nov 21 14:44:28 crc kubenswrapper[5133]: I1121 14:44:28.300888 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"bc682d32-5242-41ad-8040-1b88a4ed2534","Type":"ContainerStarted","Data":"1ca7ddbe5dc913638c0cbf969b53e33189ca63623a7b199b7a8492555bd96bdb"} Nov 21 14:44:28 crc kubenswrapper[5133]: I1121 14:44:28.319845 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-6494f4b7cd-7bgbs" podStartSLOduration=3.008163653 podStartE2EDuration="16.319824807s" podCreationTimestamp="2025-11-21 14:44:12 +0000 UTC" firstStartedPulling="2025-11-21 14:44:13.408188953 +0000 UTC m=+3713.206021201" lastFinishedPulling="2025-11-21 14:44:26.719850107 +0000 UTC m=+3726.517682355" observedRunningTime="2025-11-21 14:44:28.314578509 +0000 UTC m=+3728.112410757" watchObservedRunningTime="2025-11-21 14:44:28.319824807 +0000 UTC m=+3728.117657055" Nov 21 14:44:28 crc kubenswrapper[5133]: I1121 14:44:28.333931 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-fc96bd488-z7zf6" podStartSLOduration=2.9146798 podStartE2EDuration="16.333913788s" podCreationTimestamp="2025-11-21 14:44:12 +0000 UTC" firstStartedPulling="2025-11-21 14:44:13.301267856 +0000 UTC m=+3713.099100094" lastFinishedPulling="2025-11-21 14:44:26.720501824 +0000 UTC m=+3726.518334082" observedRunningTime="2025-11-21 14:44:28.332920722 +0000 UTC m=+3728.130752970" watchObservedRunningTime="2025-11-21 14:44:28.333913788 +0000 UTC m=+3728.131746036" Nov 21 14:44:28 crc kubenswrapper[5133]: I1121 14:44:28.359915 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-85888d8669-n7vg6" podStartSLOduration=3.763448609 podStartE2EDuration="19.359899773s" podCreationTimestamp="2025-11-21 14:44:09 +0000 UTC" firstStartedPulling="2025-11-21 14:44:11.122746685 +0000 UTC m=+3710.920578933" lastFinishedPulling="2025-11-21 14:44:26.719197849 +0000 UTC m=+3726.517030097" observedRunningTime="2025-11-21 14:44:28.351521302 +0000 UTC m=+3728.149353550" watchObservedRunningTime="2025-11-21 14:44:28.359899773 +0000 UTC m=+3728.157732021" Nov 21 14:44:28 crc kubenswrapper[5133]: I1121 14:44:28.410759 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=12.410737052 podStartE2EDuration="12.410737052s" podCreationTimestamp="2025-11-21 14:44:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 14:44:28.403779399 +0000 UTC m=+3728.201611667" watchObservedRunningTime="2025-11-21 14:44:28.410737052 +0000 UTC m=+3728.208569300" Nov 21 14:44:28 crc kubenswrapper[5133]: I1121 14:44:28.416608 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=12.416590346 podStartE2EDuration="12.416590346s" podCreationTimestamp="2025-11-21 14:44:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 14:44:28.380320461 +0000 UTC m=+3728.178152709" watchObservedRunningTime="2025-11-21 14:44:28.416590346 +0000 UTC m=+3728.214422594" Nov 21 14:44:29 crc kubenswrapper[5133]: I1121 14:44:29.930520 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-85888d8669-n7vg6" Nov 21 14:44:30 crc kubenswrapper[5133]: I1121 14:44:30.011543 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-c44865549-jt2j7" Nov 21 14:44:32 crc kubenswrapper[5133]: I1121 14:44:32.495473 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-fc96bd488-z7zf6" Nov 21 14:44:32 crc kubenswrapper[5133]: I1121 14:44:32.495835 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-fc96bd488-z7zf6" Nov 21 14:44:32 crc kubenswrapper[5133]: I1121 14:44:32.669102 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-6494f4b7cd-7bgbs" Nov 21 14:44:32 crc kubenswrapper[5133]: I1121 14:44:32.669459 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-6494f4b7cd-7bgbs" Nov 21 14:44:36 crc kubenswrapper[5133]: I1121 14:44:36.578147 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 21 14:44:36 crc kubenswrapper[5133]: I1121 14:44:36.578636 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 21 14:44:36 crc kubenswrapper[5133]: I1121 14:44:36.618443 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 21 14:44:36 crc kubenswrapper[5133]: I1121 14:44:36.620913 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 21 14:44:36 crc kubenswrapper[5133]: I1121 14:44:36.657428 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 21 14:44:36 crc kubenswrapper[5133]: I1121 14:44:36.662917 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 21 14:44:36 crc kubenswrapper[5133]: I1121 14:44:36.667378 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 21 14:44:36 crc kubenswrapper[5133]: I1121 14:44:36.675823 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 21 14:44:37 crc kubenswrapper[5133]: I1121 14:44:37.383873 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 21 14:44:37 crc kubenswrapper[5133]: I1121 14:44:37.383927 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 21 14:44:37 crc kubenswrapper[5133]: I1121 14:44:37.383942 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 21 14:44:37 crc kubenswrapper[5133]: I1121 14:44:37.383955 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 21 14:44:40 crc kubenswrapper[5133]: E1121 14:44:40.759294 5133 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-manila-api:current-podified" Nov 21 14:44:40 crc kubenswrapper[5133]: E1121 14:44:40.760165 5133 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manila-db-sync,Image:quay.io/podified-antelope-centos9/openstack-manila-api:current-podified,Command:[/bin/bash],Args:[-c sleep 0 && /usr/bin/manila-manage --config-dir /etc/manila/manila.conf.d db sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:job-config-data,ReadOnly:true,MountPath:/etc/manila/manila.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-ws7d5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42429,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42429,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod manila-db-sync-k4xmx_openstack(4126fdfa-1d4a-4276-951e-c1a26a3492dd): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 21 14:44:40 crc kubenswrapper[5133]: E1121 14:44:40.761412 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/manila-db-sync-k4xmx" podUID="4126fdfa-1d4a-4276-951e-c1a26a3492dd" Nov 21 14:44:41 crc kubenswrapper[5133]: E1121 14:44:41.423380 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manila-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-manila-api:current-podified\\\"\"" pod="openstack/manila-db-sync-k4xmx" podUID="4126fdfa-1d4a-4276-951e-c1a26a3492dd" Nov 21 14:44:42 crc kubenswrapper[5133]: I1121 14:44:42.497645 5133 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-fc96bd488-z7zf6" podUID="63a7b11c-9f04-4a20-a705-a8ae4fe98285" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.242:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.242:8443: connect: connection refused" Nov 21 14:44:42 crc kubenswrapper[5133]: I1121 14:44:42.671971 5133 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-6494f4b7cd-7bgbs" podUID="ce180175-f40a-48ea-bf15-1c8bfa8ff9aa" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.243:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.243:8443: connect: connection refused" Nov 21 14:44:50 crc kubenswrapper[5133]: I1121 14:44:50.874891 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 21 14:44:50 crc kubenswrapper[5133]: I1121 14:44:50.875743 5133 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 21 14:44:50 crc kubenswrapper[5133]: I1121 14:44:50.900906 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 21 14:44:50 crc kubenswrapper[5133]: I1121 14:44:50.902209 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 21 14:44:50 crc kubenswrapper[5133]: I1121 14:44:50.902345 5133 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 21 14:44:50 crc kubenswrapper[5133]: I1121 14:44:50.904805 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 21 14:44:53 crc kubenswrapper[5133]: I1121 14:44:53.310935 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 14:44:53 crc kubenswrapper[5133]: I1121 14:44:53.311323 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 14:44:53 crc kubenswrapper[5133]: I1121 14:44:53.311370 5133 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" Nov 21 14:44:53 crc kubenswrapper[5133]: I1121 14:44:53.312294 5133 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"8580d7a7211cee4078050130d68876fc21d141fe3a8eaa4aa514ada3bc5ab459"} pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 14:44:53 crc kubenswrapper[5133]: I1121 14:44:53.312366 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" containerID="cri-o://8580d7a7211cee4078050130d68876fc21d141fe3a8eaa4aa514ada3bc5ab459" gracePeriod=600 Nov 21 14:44:53 crc kubenswrapper[5133]: E1121 14:44:53.884330 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:44:54 crc kubenswrapper[5133]: I1121 14:44:54.556260 5133 generic.go:334] "Generic (PLEG): container finished" podID="52f5a729-05d1-4f84-a216-1df3233af57d" containerID="8580d7a7211cee4078050130d68876fc21d141fe3a8eaa4aa514ada3bc5ab459" exitCode=0 Nov 21 14:44:54 crc kubenswrapper[5133]: I1121 14:44:54.556331 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" event={"ID":"52f5a729-05d1-4f84-a216-1df3233af57d","Type":"ContainerDied","Data":"8580d7a7211cee4078050130d68876fc21d141fe3a8eaa4aa514ada3bc5ab459"} Nov 21 14:44:54 crc kubenswrapper[5133]: I1121 14:44:54.556605 5133 scope.go:117] "RemoveContainer" containerID="142b6de6b55e03cb42254cbecc2c0c858a38432be696d3f2b827d387bb6c86eb" Nov 21 14:44:54 crc kubenswrapper[5133]: I1121 14:44:54.558634 5133 scope.go:117] "RemoveContainer" containerID="8580d7a7211cee4078050130d68876fc21d141fe3a8eaa4aa514ada3bc5ab459" Nov 21 14:44:54 crc kubenswrapper[5133]: I1121 14:44:54.558867 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-sync-k4xmx" event={"ID":"4126fdfa-1d4a-4276-951e-c1a26a3492dd","Type":"ContainerStarted","Data":"bdd71fb7ddc6251ba22a8e7c91f642a797a40bb264e124d59c897d45aff00ef5"} Nov 21 14:44:54 crc kubenswrapper[5133]: E1121 14:44:54.558991 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:44:54 crc kubenswrapper[5133]: I1121 14:44:54.630386 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-db-sync-k4xmx" podStartSLOduration=8.777094279 podStartE2EDuration="35.630363465s" podCreationTimestamp="2025-11-21 14:44:19 +0000 UTC" firstStartedPulling="2025-11-21 14:44:27.064073315 +0000 UTC m=+3726.861905563" lastFinishedPulling="2025-11-21 14:44:53.917342491 +0000 UTC m=+3753.715174749" observedRunningTime="2025-11-21 14:44:54.609085865 +0000 UTC m=+3754.406918133" watchObservedRunningTime="2025-11-21 14:44:54.630363465 +0000 UTC m=+3754.428195713" Nov 21 14:44:55 crc kubenswrapper[5133]: I1121 14:44:55.918187 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-fc96bd488-z7zf6" Nov 21 14:44:56 crc kubenswrapper[5133]: I1121 14:44:56.035294 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-6494f4b7cd-7bgbs" Nov 21 14:44:59 crc kubenswrapper[5133]: I1121 14:44:59.621450 5133 generic.go:334] "Generic (PLEG): container finished" podID="120ff136-ecbc-4f53-9338-d1435003710a" containerID="3de5a2fe389c44ad3c9c0eabe1945327d01d925031be3f61e25852bc7597867b" exitCode=137 Nov 21 14:44:59 crc kubenswrapper[5133]: I1121 14:44:59.622107 5133 generic.go:334] "Generic (PLEG): container finished" podID="120ff136-ecbc-4f53-9338-d1435003710a" containerID="21ad40de45d9cde3e9d845fb48b069aaf4dfaa28c5880c10e378a91c37c88411" exitCode=137 Nov 21 14:44:59 crc kubenswrapper[5133]: I1121 14:44:59.621515 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-85888d8669-n7vg6" event={"ID":"120ff136-ecbc-4f53-9338-d1435003710a","Type":"ContainerDied","Data":"3de5a2fe389c44ad3c9c0eabe1945327d01d925031be3f61e25852bc7597867b"} Nov 21 14:44:59 crc kubenswrapper[5133]: I1121 14:44:59.622208 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-85888d8669-n7vg6" event={"ID":"120ff136-ecbc-4f53-9338-d1435003710a","Type":"ContainerDied","Data":"21ad40de45d9cde3e9d845fb48b069aaf4dfaa28c5880c10e378a91c37c88411"} Nov 21 14:44:59 crc kubenswrapper[5133]: I1121 14:44:59.627366 5133 generic.go:334] "Generic (PLEG): container finished" podID="a343caf3-f248-4c28-8131-d074688735ad" containerID="87f0653444432a51eef4f6b8be05fb9e4d940372d7368273b305f6be5555c9fd" exitCode=137 Nov 21 14:44:59 crc kubenswrapper[5133]: I1121 14:44:59.627404 5133 generic.go:334] "Generic (PLEG): container finished" podID="a343caf3-f248-4c28-8131-d074688735ad" containerID="33362cc9d398695cf2fb634b04ad82951eaaf19a689fe77f97297db9faf80cc6" exitCode=137 Nov 21 14:44:59 crc kubenswrapper[5133]: I1121 14:44:59.627429 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-c44865549-jt2j7" event={"ID":"a343caf3-f248-4c28-8131-d074688735ad","Type":"ContainerDied","Data":"87f0653444432a51eef4f6b8be05fb9e4d940372d7368273b305f6be5555c9fd"} Nov 21 14:44:59 crc kubenswrapper[5133]: I1121 14:44:59.627460 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-c44865549-jt2j7" event={"ID":"a343caf3-f248-4c28-8131-d074688735ad","Type":"ContainerDied","Data":"33362cc9d398695cf2fb634b04ad82951eaaf19a689fe77f97297db9faf80cc6"} Nov 21 14:45:00 crc kubenswrapper[5133]: I1121 14:45:00.168202 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395605-vl8bk"] Nov 21 14:45:00 crc kubenswrapper[5133]: I1121 14:45:00.169859 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395605-vl8bk" Nov 21 14:45:00 crc kubenswrapper[5133]: I1121 14:45:00.172215 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 21 14:45:00 crc kubenswrapper[5133]: I1121 14:45:00.172489 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 21 14:45:00 crc kubenswrapper[5133]: I1121 14:45:00.186157 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395605-vl8bk"] Nov 21 14:45:00 crc kubenswrapper[5133]: I1121 14:45:00.255301 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-c44865549-jt2j7" Nov 21 14:45:00 crc kubenswrapper[5133]: I1121 14:45:00.263801 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-85888d8669-n7vg6" Nov 21 14:45:00 crc kubenswrapper[5133]: I1121 14:45:00.315550 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e73d3a2d-e8eb-422d-84f8-e938f8594392-config-volume\") pod \"collect-profiles-29395605-vl8bk\" (UID: \"e73d3a2d-e8eb-422d-84f8-e938f8594392\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395605-vl8bk" Nov 21 14:45:00 crc kubenswrapper[5133]: I1121 14:45:00.315791 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kt4nj\" (UniqueName: \"kubernetes.io/projected/e73d3a2d-e8eb-422d-84f8-e938f8594392-kube-api-access-kt4nj\") pod \"collect-profiles-29395605-vl8bk\" (UID: \"e73d3a2d-e8eb-422d-84f8-e938f8594392\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395605-vl8bk" Nov 21 14:45:00 crc kubenswrapper[5133]: I1121 14:45:00.315900 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e73d3a2d-e8eb-422d-84f8-e938f8594392-secret-volume\") pod \"collect-profiles-29395605-vl8bk\" (UID: \"e73d3a2d-e8eb-422d-84f8-e938f8594392\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395605-vl8bk" Nov 21 14:45:00 crc kubenswrapper[5133]: I1121 14:45:00.416917 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6chgb\" (UniqueName: \"kubernetes.io/projected/120ff136-ecbc-4f53-9338-d1435003710a-kube-api-access-6chgb\") pod \"120ff136-ecbc-4f53-9338-d1435003710a\" (UID: \"120ff136-ecbc-4f53-9338-d1435003710a\") " Nov 21 14:45:00 crc kubenswrapper[5133]: I1121 14:45:00.416961 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qxllz\" (UniqueName: \"kubernetes.io/projected/a343caf3-f248-4c28-8131-d074688735ad-kube-api-access-qxllz\") pod \"a343caf3-f248-4c28-8131-d074688735ad\" (UID: \"a343caf3-f248-4c28-8131-d074688735ad\") " Nov 21 14:45:00 crc kubenswrapper[5133]: I1121 14:45:00.417051 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a343caf3-f248-4c28-8131-d074688735ad-config-data\") pod \"a343caf3-f248-4c28-8131-d074688735ad\" (UID: \"a343caf3-f248-4c28-8131-d074688735ad\") " Nov 21 14:45:00 crc kubenswrapper[5133]: I1121 14:45:00.417121 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/120ff136-ecbc-4f53-9338-d1435003710a-horizon-secret-key\") pod \"120ff136-ecbc-4f53-9338-d1435003710a\" (UID: \"120ff136-ecbc-4f53-9338-d1435003710a\") " Nov 21 14:45:00 crc kubenswrapper[5133]: I1121 14:45:00.417148 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/a343caf3-f248-4c28-8131-d074688735ad-horizon-secret-key\") pod \"a343caf3-f248-4c28-8131-d074688735ad\" (UID: \"a343caf3-f248-4c28-8131-d074688735ad\") " Nov 21 14:45:00 crc kubenswrapper[5133]: I1121 14:45:00.417196 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/120ff136-ecbc-4f53-9338-d1435003710a-scripts\") pod \"120ff136-ecbc-4f53-9338-d1435003710a\" (UID: \"120ff136-ecbc-4f53-9338-d1435003710a\") " Nov 21 14:45:00 crc kubenswrapper[5133]: I1121 14:45:00.417231 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/120ff136-ecbc-4f53-9338-d1435003710a-logs\") pod \"120ff136-ecbc-4f53-9338-d1435003710a\" (UID: \"120ff136-ecbc-4f53-9338-d1435003710a\") " Nov 21 14:45:00 crc kubenswrapper[5133]: I1121 14:45:00.417250 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a343caf3-f248-4c28-8131-d074688735ad-logs\") pod \"a343caf3-f248-4c28-8131-d074688735ad\" (UID: \"a343caf3-f248-4c28-8131-d074688735ad\") " Nov 21 14:45:00 crc kubenswrapper[5133]: I1121 14:45:00.417270 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a343caf3-f248-4c28-8131-d074688735ad-scripts\") pod \"a343caf3-f248-4c28-8131-d074688735ad\" (UID: \"a343caf3-f248-4c28-8131-d074688735ad\") " Nov 21 14:45:00 crc kubenswrapper[5133]: I1121 14:45:00.417345 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/120ff136-ecbc-4f53-9338-d1435003710a-config-data\") pod \"120ff136-ecbc-4f53-9338-d1435003710a\" (UID: \"120ff136-ecbc-4f53-9338-d1435003710a\") " Nov 21 14:45:00 crc kubenswrapper[5133]: I1121 14:45:00.417646 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e73d3a2d-e8eb-422d-84f8-e938f8594392-secret-volume\") pod \"collect-profiles-29395605-vl8bk\" (UID: \"e73d3a2d-e8eb-422d-84f8-e938f8594392\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395605-vl8bk" Nov 21 14:45:00 crc kubenswrapper[5133]: I1121 14:45:00.417726 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e73d3a2d-e8eb-422d-84f8-e938f8594392-config-volume\") pod \"collect-profiles-29395605-vl8bk\" (UID: \"e73d3a2d-e8eb-422d-84f8-e938f8594392\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395605-vl8bk" Nov 21 14:45:00 crc kubenswrapper[5133]: I1121 14:45:00.417783 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kt4nj\" (UniqueName: \"kubernetes.io/projected/e73d3a2d-e8eb-422d-84f8-e938f8594392-kube-api-access-kt4nj\") pod \"collect-profiles-29395605-vl8bk\" (UID: \"e73d3a2d-e8eb-422d-84f8-e938f8594392\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395605-vl8bk" Nov 21 14:45:00 crc kubenswrapper[5133]: I1121 14:45:00.418916 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/120ff136-ecbc-4f53-9338-d1435003710a-logs" (OuterVolumeSpecName: "logs") pod "120ff136-ecbc-4f53-9338-d1435003710a" (UID: "120ff136-ecbc-4f53-9338-d1435003710a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:45:00 crc kubenswrapper[5133]: I1121 14:45:00.419073 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a343caf3-f248-4c28-8131-d074688735ad-logs" (OuterVolumeSpecName: "logs") pod "a343caf3-f248-4c28-8131-d074688735ad" (UID: "a343caf3-f248-4c28-8131-d074688735ad"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:45:00 crc kubenswrapper[5133]: I1121 14:45:00.419394 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e73d3a2d-e8eb-422d-84f8-e938f8594392-config-volume\") pod \"collect-profiles-29395605-vl8bk\" (UID: \"e73d3a2d-e8eb-422d-84f8-e938f8594392\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395605-vl8bk" Nov 21 14:45:00 crc kubenswrapper[5133]: I1121 14:45:00.424222 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/120ff136-ecbc-4f53-9338-d1435003710a-kube-api-access-6chgb" (OuterVolumeSpecName: "kube-api-access-6chgb") pod "120ff136-ecbc-4f53-9338-d1435003710a" (UID: "120ff136-ecbc-4f53-9338-d1435003710a"). InnerVolumeSpecName "kube-api-access-6chgb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:45:00 crc kubenswrapper[5133]: I1121 14:45:00.431223 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/120ff136-ecbc-4f53-9338-d1435003710a-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "120ff136-ecbc-4f53-9338-d1435003710a" (UID: "120ff136-ecbc-4f53-9338-d1435003710a"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:45:00 crc kubenswrapper[5133]: I1121 14:45:00.437990 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a343caf3-f248-4c28-8131-d074688735ad-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "a343caf3-f248-4c28-8131-d074688735ad" (UID: "a343caf3-f248-4c28-8131-d074688735ad"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:45:00 crc kubenswrapper[5133]: I1121 14:45:00.438814 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a343caf3-f248-4c28-8131-d074688735ad-kube-api-access-qxllz" (OuterVolumeSpecName: "kube-api-access-qxllz") pod "a343caf3-f248-4c28-8131-d074688735ad" (UID: "a343caf3-f248-4c28-8131-d074688735ad"). InnerVolumeSpecName "kube-api-access-qxllz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:45:00 crc kubenswrapper[5133]: I1121 14:45:00.439703 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e73d3a2d-e8eb-422d-84f8-e938f8594392-secret-volume\") pod \"collect-profiles-29395605-vl8bk\" (UID: \"e73d3a2d-e8eb-422d-84f8-e938f8594392\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395605-vl8bk" Nov 21 14:45:00 crc kubenswrapper[5133]: I1121 14:45:00.441289 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kt4nj\" (UniqueName: \"kubernetes.io/projected/e73d3a2d-e8eb-422d-84f8-e938f8594392-kube-api-access-kt4nj\") pod \"collect-profiles-29395605-vl8bk\" (UID: \"e73d3a2d-e8eb-422d-84f8-e938f8594392\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395605-vl8bk" Nov 21 14:45:00 crc kubenswrapper[5133]: I1121 14:45:00.450721 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/120ff136-ecbc-4f53-9338-d1435003710a-config-data" (OuterVolumeSpecName: "config-data") pod "120ff136-ecbc-4f53-9338-d1435003710a" (UID: "120ff136-ecbc-4f53-9338-d1435003710a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:45:00 crc kubenswrapper[5133]: I1121 14:45:00.454663 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/120ff136-ecbc-4f53-9338-d1435003710a-scripts" (OuterVolumeSpecName: "scripts") pod "120ff136-ecbc-4f53-9338-d1435003710a" (UID: "120ff136-ecbc-4f53-9338-d1435003710a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:45:00 crc kubenswrapper[5133]: I1121 14:45:00.454737 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a343caf3-f248-4c28-8131-d074688735ad-scripts" (OuterVolumeSpecName: "scripts") pod "a343caf3-f248-4c28-8131-d074688735ad" (UID: "a343caf3-f248-4c28-8131-d074688735ad"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:45:00 crc kubenswrapper[5133]: I1121 14:45:00.466248 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a343caf3-f248-4c28-8131-d074688735ad-config-data" (OuterVolumeSpecName: "config-data") pod "a343caf3-f248-4c28-8131-d074688735ad" (UID: "a343caf3-f248-4c28-8131-d074688735ad"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:45:00 crc kubenswrapper[5133]: I1121 14:45:00.519592 5133 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/120ff136-ecbc-4f53-9338-d1435003710a-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 14:45:00 crc kubenswrapper[5133]: I1121 14:45:00.519626 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6chgb\" (UniqueName: \"kubernetes.io/projected/120ff136-ecbc-4f53-9338-d1435003710a-kube-api-access-6chgb\") on node \"crc\" DevicePath \"\"" Nov 21 14:45:00 crc kubenswrapper[5133]: I1121 14:45:00.519640 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qxllz\" (UniqueName: \"kubernetes.io/projected/a343caf3-f248-4c28-8131-d074688735ad-kube-api-access-qxllz\") on node \"crc\" DevicePath \"\"" Nov 21 14:45:00 crc kubenswrapper[5133]: I1121 14:45:00.519652 5133 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a343caf3-f248-4c28-8131-d074688735ad-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 14:45:00 crc kubenswrapper[5133]: I1121 14:45:00.519663 5133 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/120ff136-ecbc-4f53-9338-d1435003710a-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 21 14:45:00 crc kubenswrapper[5133]: I1121 14:45:00.519673 5133 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/a343caf3-f248-4c28-8131-d074688735ad-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 21 14:45:00 crc kubenswrapper[5133]: I1121 14:45:00.519717 5133 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/120ff136-ecbc-4f53-9338-d1435003710a-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 14:45:00 crc kubenswrapper[5133]: I1121 14:45:00.519741 5133 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/120ff136-ecbc-4f53-9338-d1435003710a-logs\") on node \"crc\" DevicePath \"\"" Nov 21 14:45:00 crc kubenswrapper[5133]: I1121 14:45:00.519761 5133 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a343caf3-f248-4c28-8131-d074688735ad-logs\") on node \"crc\" DevicePath \"\"" Nov 21 14:45:00 crc kubenswrapper[5133]: I1121 14:45:00.519817 5133 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a343caf3-f248-4c28-8131-d074688735ad-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 14:45:00 crc kubenswrapper[5133]: I1121 14:45:00.584778 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395605-vl8bk" Nov 21 14:45:00 crc kubenswrapper[5133]: I1121 14:45:00.652330 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-c44865549-jt2j7" Nov 21 14:45:00 crc kubenswrapper[5133]: I1121 14:45:00.652716 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-c44865549-jt2j7" event={"ID":"a343caf3-f248-4c28-8131-d074688735ad","Type":"ContainerDied","Data":"24236545fa5456b80352cf6026cf34f0782baf823029a6c8078f92f5296bedf0"} Nov 21 14:45:00 crc kubenswrapper[5133]: I1121 14:45:00.653435 5133 scope.go:117] "RemoveContainer" containerID="87f0653444432a51eef4f6b8be05fb9e4d940372d7368273b305f6be5555c9fd" Nov 21 14:45:00 crc kubenswrapper[5133]: I1121 14:45:00.657224 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-85888d8669-n7vg6" event={"ID":"120ff136-ecbc-4f53-9338-d1435003710a","Type":"ContainerDied","Data":"d0714f83e8b9cd2c56b79360582a73e54d154a266477b19465491378707b14b6"} Nov 21 14:45:00 crc kubenswrapper[5133]: I1121 14:45:00.657410 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-85888d8669-n7vg6" Nov 21 14:45:00 crc kubenswrapper[5133]: I1121 14:45:00.699705 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-c44865549-jt2j7"] Nov 21 14:45:00 crc kubenswrapper[5133]: I1121 14:45:00.712780 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-c44865549-jt2j7"] Nov 21 14:45:00 crc kubenswrapper[5133]: I1121 14:45:00.720594 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-85888d8669-n7vg6"] Nov 21 14:45:00 crc kubenswrapper[5133]: I1121 14:45:00.728103 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-85888d8669-n7vg6"] Nov 21 14:45:00 crc kubenswrapper[5133]: I1121 14:45:00.856496 5133 scope.go:117] "RemoveContainer" containerID="33362cc9d398695cf2fb634b04ad82951eaaf19a689fe77f97297db9faf80cc6" Nov 21 14:45:00 crc kubenswrapper[5133]: I1121 14:45:00.894776 5133 scope.go:117] "RemoveContainer" containerID="3de5a2fe389c44ad3c9c0eabe1945327d01d925031be3f61e25852bc7597867b" Nov 21 14:45:00 crc kubenswrapper[5133]: I1121 14:45:00.923174 5133 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-fc96bd488-z7zf6" podUID="63a7b11c-9f04-4a20-a705-a8ae4fe98285" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.242:8443/dashboard/auth/login/?next=/dashboard/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 21 14:45:01 crc kubenswrapper[5133]: I1121 14:45:01.040262 5133 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-6494f4b7cd-7bgbs" podUID="ce180175-f40a-48ea-bf15-1c8bfa8ff9aa" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.243:8443/dashboard/auth/login/?next=/dashboard/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 21 14:45:01 crc kubenswrapper[5133]: I1121 14:45:01.043077 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395605-vl8bk"] Nov 21 14:45:01 crc kubenswrapper[5133]: I1121 14:45:01.092423 5133 scope.go:117] "RemoveContainer" containerID="21ad40de45d9cde3e9d845fb48b069aaf4dfaa28c5880c10e378a91c37c88411" Nov 21 14:45:01 crc kubenswrapper[5133]: W1121 14:45:01.098776 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode73d3a2d_e8eb_422d_84f8_e938f8594392.slice/crio-c2a8ebd942c8ef6650d13f57e586469ce1aa6a385d659138dd19ff1a50f114d3 WatchSource:0}: Error finding container c2a8ebd942c8ef6650d13f57e586469ce1aa6a385d659138dd19ff1a50f114d3: Status 404 returned error can't find the container with id c2a8ebd942c8ef6650d13f57e586469ce1aa6a385d659138dd19ff1a50f114d3 Nov 21 14:45:01 crc kubenswrapper[5133]: I1121 14:45:01.668774 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395605-vl8bk" event={"ID":"e73d3a2d-e8eb-422d-84f8-e938f8594392","Type":"ContainerStarted","Data":"c2a8ebd942c8ef6650d13f57e586469ce1aa6a385d659138dd19ff1a50f114d3"} Nov 21 14:45:02 crc kubenswrapper[5133]: I1121 14:45:02.469016 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="120ff136-ecbc-4f53-9338-d1435003710a" path="/var/lib/kubelet/pods/120ff136-ecbc-4f53-9338-d1435003710a/volumes" Nov 21 14:45:02 crc kubenswrapper[5133]: I1121 14:45:02.469683 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a343caf3-f248-4c28-8131-d074688735ad" path="/var/lib/kubelet/pods/a343caf3-f248-4c28-8131-d074688735ad/volumes" Nov 21 14:45:03 crc kubenswrapper[5133]: I1121 14:45:03.692091 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395605-vl8bk" event={"ID":"e73d3a2d-e8eb-422d-84f8-e938f8594392","Type":"ContainerStarted","Data":"cf6eb7d4068c3ae9a3d124af01410353e736b42fb84428f9ce10bcbe4f152c6f"} Nov 21 14:45:04 crc kubenswrapper[5133]: I1121 14:45:04.439602 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-6494f4b7cd-7bgbs" Nov 21 14:45:04 crc kubenswrapper[5133]: I1121 14:45:04.513418 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-fc96bd488-z7zf6"] Nov 21 14:45:04 crc kubenswrapper[5133]: I1121 14:45:04.513797 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-fc96bd488-z7zf6" podUID="63a7b11c-9f04-4a20-a705-a8ae4fe98285" containerName="horizon-log" containerID="cri-o://8a673cfc580e0cbfdc91c9fd6e409f0dd4ef3e564de6c8bd155f353690ce5481" gracePeriod=30 Nov 21 14:45:04 crc kubenswrapper[5133]: I1121 14:45:04.514343 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-fc96bd488-z7zf6" podUID="63a7b11c-9f04-4a20-a705-a8ae4fe98285" containerName="horizon" containerID="cri-o://5b43aa4d56390f27cc697f1ee7c55bbfbb2ac957fc240386054df7ec367e1031" gracePeriod=30 Nov 21 14:45:04 crc kubenswrapper[5133]: I1121 14:45:04.532193 5133 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-fc96bd488-z7zf6" podUID="63a7b11c-9f04-4a20-a705-a8ae4fe98285" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.242:8443/dashboard/auth/login/?next=/dashboard/\": EOF" Nov 21 14:45:04 crc kubenswrapper[5133]: I1121 14:45:04.703953 5133 generic.go:334] "Generic (PLEG): container finished" podID="e73d3a2d-e8eb-422d-84f8-e938f8594392" containerID="cf6eb7d4068c3ae9a3d124af01410353e736b42fb84428f9ce10bcbe4f152c6f" exitCode=0 Nov 21 14:45:04 crc kubenswrapper[5133]: I1121 14:45:04.704043 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395605-vl8bk" event={"ID":"e73d3a2d-e8eb-422d-84f8-e938f8594392","Type":"ContainerDied","Data":"cf6eb7d4068c3ae9a3d124af01410353e736b42fb84428f9ce10bcbe4f152c6f"} Nov 21 14:45:06 crc kubenswrapper[5133]: I1121 14:45:06.385144 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395605-vl8bk" Nov 21 14:45:06 crc kubenswrapper[5133]: I1121 14:45:06.545227 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kt4nj\" (UniqueName: \"kubernetes.io/projected/e73d3a2d-e8eb-422d-84f8-e938f8594392-kube-api-access-kt4nj\") pod \"e73d3a2d-e8eb-422d-84f8-e938f8594392\" (UID: \"e73d3a2d-e8eb-422d-84f8-e938f8594392\") " Nov 21 14:45:06 crc kubenswrapper[5133]: I1121 14:45:06.545428 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e73d3a2d-e8eb-422d-84f8-e938f8594392-config-volume\") pod \"e73d3a2d-e8eb-422d-84f8-e938f8594392\" (UID: \"e73d3a2d-e8eb-422d-84f8-e938f8594392\") " Nov 21 14:45:06 crc kubenswrapper[5133]: I1121 14:45:06.545546 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e73d3a2d-e8eb-422d-84f8-e938f8594392-secret-volume\") pod \"e73d3a2d-e8eb-422d-84f8-e938f8594392\" (UID: \"e73d3a2d-e8eb-422d-84f8-e938f8594392\") " Nov 21 14:45:06 crc kubenswrapper[5133]: I1121 14:45:06.546320 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e73d3a2d-e8eb-422d-84f8-e938f8594392-config-volume" (OuterVolumeSpecName: "config-volume") pod "e73d3a2d-e8eb-422d-84f8-e938f8594392" (UID: "e73d3a2d-e8eb-422d-84f8-e938f8594392"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:45:06 crc kubenswrapper[5133]: I1121 14:45:06.555314 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e73d3a2d-e8eb-422d-84f8-e938f8594392-kube-api-access-kt4nj" (OuterVolumeSpecName: "kube-api-access-kt4nj") pod "e73d3a2d-e8eb-422d-84f8-e938f8594392" (UID: "e73d3a2d-e8eb-422d-84f8-e938f8594392"). InnerVolumeSpecName "kube-api-access-kt4nj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:45:06 crc kubenswrapper[5133]: I1121 14:45:06.568168 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e73d3a2d-e8eb-422d-84f8-e938f8594392-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "e73d3a2d-e8eb-422d-84f8-e938f8594392" (UID: "e73d3a2d-e8eb-422d-84f8-e938f8594392"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:45:06 crc kubenswrapper[5133]: I1121 14:45:06.648010 5133 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e73d3a2d-e8eb-422d-84f8-e938f8594392-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 21 14:45:06 crc kubenswrapper[5133]: I1121 14:45:06.648044 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kt4nj\" (UniqueName: \"kubernetes.io/projected/e73d3a2d-e8eb-422d-84f8-e938f8594392-kube-api-access-kt4nj\") on node \"crc\" DevicePath \"\"" Nov 21 14:45:06 crc kubenswrapper[5133]: I1121 14:45:06.648052 5133 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e73d3a2d-e8eb-422d-84f8-e938f8594392-config-volume\") on node \"crc\" DevicePath \"\"" Nov 21 14:45:06 crc kubenswrapper[5133]: I1121 14:45:06.721334 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395605-vl8bk" event={"ID":"e73d3a2d-e8eb-422d-84f8-e938f8594392","Type":"ContainerDied","Data":"c2a8ebd942c8ef6650d13f57e586469ce1aa6a385d659138dd19ff1a50f114d3"} Nov 21 14:45:06 crc kubenswrapper[5133]: I1121 14:45:06.721372 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c2a8ebd942c8ef6650d13f57e586469ce1aa6a385d659138dd19ff1a50f114d3" Nov 21 14:45:06 crc kubenswrapper[5133]: I1121 14:45:06.721425 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395605-vl8bk" Nov 21 14:45:07 crc kubenswrapper[5133]: I1121 14:45:07.542861 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395560-hlz6p"] Nov 21 14:45:07 crc kubenswrapper[5133]: I1121 14:45:07.559502 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395560-hlz6p"] Nov 21 14:45:07 crc kubenswrapper[5133]: I1121 14:45:07.784755 5133 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-fc96bd488-z7zf6" podUID="63a7b11c-9f04-4a20-a705-a8ae4fe98285" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.242:8443/dashboard/auth/login/?next=/dashboard/\": read tcp 10.217.0.2:40592->10.217.0.242:8443: read: connection reset by peer" Nov 21 14:45:08 crc kubenswrapper[5133]: I1121 14:45:08.458374 5133 scope.go:117] "RemoveContainer" containerID="8580d7a7211cee4078050130d68876fc21d141fe3a8eaa4aa514ada3bc5ab459" Nov 21 14:45:08 crc kubenswrapper[5133]: E1121 14:45:08.458768 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:45:08 crc kubenswrapper[5133]: I1121 14:45:08.468812 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ac012817-c59e-4d3b-8d42-4cb7795b1034" path="/var/lib/kubelet/pods/ac012817-c59e-4d3b-8d42-4cb7795b1034/volumes" Nov 21 14:45:08 crc kubenswrapper[5133]: I1121 14:45:08.745836 5133 generic.go:334] "Generic (PLEG): container finished" podID="63a7b11c-9f04-4a20-a705-a8ae4fe98285" containerID="5b43aa4d56390f27cc697f1ee7c55bbfbb2ac957fc240386054df7ec367e1031" exitCode=0 Nov 21 14:45:08 crc kubenswrapper[5133]: I1121 14:45:08.745911 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-fc96bd488-z7zf6" event={"ID":"63a7b11c-9f04-4a20-a705-a8ae4fe98285","Type":"ContainerDied","Data":"5b43aa4d56390f27cc697f1ee7c55bbfbb2ac957fc240386054df7ec367e1031"} Nov 21 14:45:12 crc kubenswrapper[5133]: I1121 14:45:12.495865 5133 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-fc96bd488-z7zf6" podUID="63a7b11c-9f04-4a20-a705-a8ae4fe98285" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.242:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.242:8443: connect: connection refused" Nov 21 14:45:13 crc kubenswrapper[5133]: I1121 14:45:13.692590 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-ljl8s"] Nov 21 14:45:13 crc kubenswrapper[5133]: E1121 14:45:13.692957 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="120ff136-ecbc-4f53-9338-d1435003710a" containerName="horizon" Nov 21 14:45:13 crc kubenswrapper[5133]: I1121 14:45:13.692969 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="120ff136-ecbc-4f53-9338-d1435003710a" containerName="horizon" Nov 21 14:45:13 crc kubenswrapper[5133]: E1121 14:45:13.692986 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a343caf3-f248-4c28-8131-d074688735ad" containerName="horizon-log" Nov 21 14:45:13 crc kubenswrapper[5133]: I1121 14:45:13.692994 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="a343caf3-f248-4c28-8131-d074688735ad" containerName="horizon-log" Nov 21 14:45:13 crc kubenswrapper[5133]: E1121 14:45:13.693019 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e73d3a2d-e8eb-422d-84f8-e938f8594392" containerName="collect-profiles" Nov 21 14:45:13 crc kubenswrapper[5133]: I1121 14:45:13.693026 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="e73d3a2d-e8eb-422d-84f8-e938f8594392" containerName="collect-profiles" Nov 21 14:45:13 crc kubenswrapper[5133]: E1121 14:45:13.693047 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="120ff136-ecbc-4f53-9338-d1435003710a" containerName="horizon-log" Nov 21 14:45:13 crc kubenswrapper[5133]: I1121 14:45:13.693052 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="120ff136-ecbc-4f53-9338-d1435003710a" containerName="horizon-log" Nov 21 14:45:13 crc kubenswrapper[5133]: E1121 14:45:13.693065 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a343caf3-f248-4c28-8131-d074688735ad" containerName="horizon" Nov 21 14:45:13 crc kubenswrapper[5133]: I1121 14:45:13.693070 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="a343caf3-f248-4c28-8131-d074688735ad" containerName="horizon" Nov 21 14:45:13 crc kubenswrapper[5133]: I1121 14:45:13.693278 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="a343caf3-f248-4c28-8131-d074688735ad" containerName="horizon" Nov 21 14:45:13 crc kubenswrapper[5133]: I1121 14:45:13.693297 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="120ff136-ecbc-4f53-9338-d1435003710a" containerName="horizon-log" Nov 21 14:45:13 crc kubenswrapper[5133]: I1121 14:45:13.693307 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="a343caf3-f248-4c28-8131-d074688735ad" containerName="horizon-log" Nov 21 14:45:13 crc kubenswrapper[5133]: I1121 14:45:13.693328 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="120ff136-ecbc-4f53-9338-d1435003710a" containerName="horizon" Nov 21 14:45:13 crc kubenswrapper[5133]: I1121 14:45:13.693346 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="e73d3a2d-e8eb-422d-84f8-e938f8594392" containerName="collect-profiles" Nov 21 14:45:13 crc kubenswrapper[5133]: I1121 14:45:13.695036 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ljl8s" Nov 21 14:45:13 crc kubenswrapper[5133]: I1121 14:45:13.707735 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-ljl8s"] Nov 21 14:45:13 crc kubenswrapper[5133]: I1121 14:45:13.781730 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a5b43c07-5847-4a7d-8c82-8079c3baf031-utilities\") pod \"redhat-operators-ljl8s\" (UID: \"a5b43c07-5847-4a7d-8c82-8079c3baf031\") " pod="openshift-marketplace/redhat-operators-ljl8s" Nov 21 14:45:13 crc kubenswrapper[5133]: I1121 14:45:13.782090 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-49qgq\" (UniqueName: \"kubernetes.io/projected/a5b43c07-5847-4a7d-8c82-8079c3baf031-kube-api-access-49qgq\") pod \"redhat-operators-ljl8s\" (UID: \"a5b43c07-5847-4a7d-8c82-8079c3baf031\") " pod="openshift-marketplace/redhat-operators-ljl8s" Nov 21 14:45:13 crc kubenswrapper[5133]: I1121 14:45:13.782390 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a5b43c07-5847-4a7d-8c82-8079c3baf031-catalog-content\") pod \"redhat-operators-ljl8s\" (UID: \"a5b43c07-5847-4a7d-8c82-8079c3baf031\") " pod="openshift-marketplace/redhat-operators-ljl8s" Nov 21 14:45:13 crc kubenswrapper[5133]: I1121 14:45:13.885132 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a5b43c07-5847-4a7d-8c82-8079c3baf031-utilities\") pod \"redhat-operators-ljl8s\" (UID: \"a5b43c07-5847-4a7d-8c82-8079c3baf031\") " pod="openshift-marketplace/redhat-operators-ljl8s" Nov 21 14:45:13 crc kubenswrapper[5133]: I1121 14:45:13.885428 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-49qgq\" (UniqueName: \"kubernetes.io/projected/a5b43c07-5847-4a7d-8c82-8079c3baf031-kube-api-access-49qgq\") pod \"redhat-operators-ljl8s\" (UID: \"a5b43c07-5847-4a7d-8c82-8079c3baf031\") " pod="openshift-marketplace/redhat-operators-ljl8s" Nov 21 14:45:13 crc kubenswrapper[5133]: I1121 14:45:13.885577 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a5b43c07-5847-4a7d-8c82-8079c3baf031-catalog-content\") pod \"redhat-operators-ljl8s\" (UID: \"a5b43c07-5847-4a7d-8c82-8079c3baf031\") " pod="openshift-marketplace/redhat-operators-ljl8s" Nov 21 14:45:13 crc kubenswrapper[5133]: I1121 14:45:13.885693 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a5b43c07-5847-4a7d-8c82-8079c3baf031-utilities\") pod \"redhat-operators-ljl8s\" (UID: \"a5b43c07-5847-4a7d-8c82-8079c3baf031\") " pod="openshift-marketplace/redhat-operators-ljl8s" Nov 21 14:45:13 crc kubenswrapper[5133]: I1121 14:45:13.885910 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a5b43c07-5847-4a7d-8c82-8079c3baf031-catalog-content\") pod \"redhat-operators-ljl8s\" (UID: \"a5b43c07-5847-4a7d-8c82-8079c3baf031\") " pod="openshift-marketplace/redhat-operators-ljl8s" Nov 21 14:45:13 crc kubenswrapper[5133]: I1121 14:45:13.904847 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-49qgq\" (UniqueName: \"kubernetes.io/projected/a5b43c07-5847-4a7d-8c82-8079c3baf031-kube-api-access-49qgq\") pod \"redhat-operators-ljl8s\" (UID: \"a5b43c07-5847-4a7d-8c82-8079c3baf031\") " pod="openshift-marketplace/redhat-operators-ljl8s" Nov 21 14:45:14 crc kubenswrapper[5133]: I1121 14:45:14.016371 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ljl8s" Nov 21 14:45:14 crc kubenswrapper[5133]: I1121 14:45:14.519657 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-ljl8s"] Nov 21 14:45:14 crc kubenswrapper[5133]: I1121 14:45:14.813308 5133 generic.go:334] "Generic (PLEG): container finished" podID="a5b43c07-5847-4a7d-8c82-8079c3baf031" containerID="2d190d47cf1209c159e46f148f24fc2a5544899151715676269417110d752a8b" exitCode=0 Nov 21 14:45:14 crc kubenswrapper[5133]: I1121 14:45:14.813358 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ljl8s" event={"ID":"a5b43c07-5847-4a7d-8c82-8079c3baf031","Type":"ContainerDied","Data":"2d190d47cf1209c159e46f148f24fc2a5544899151715676269417110d752a8b"} Nov 21 14:45:14 crc kubenswrapper[5133]: I1121 14:45:14.813386 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ljl8s" event={"ID":"a5b43c07-5847-4a7d-8c82-8079c3baf031","Type":"ContainerStarted","Data":"42defe9d763e9e213eaa1eacf8b38bc6f66ee0938a765fec547372f6b160ba2a"} Nov 21 14:45:16 crc kubenswrapper[5133]: I1121 14:45:16.832261 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ljl8s" event={"ID":"a5b43c07-5847-4a7d-8c82-8079c3baf031","Type":"ContainerStarted","Data":"ae6cb65421446eef23b82846b98f3028e535f74b6c9e6a0234d7f824624f63a3"} Nov 21 14:45:17 crc kubenswrapper[5133]: I1121 14:45:17.852898 5133 generic.go:334] "Generic (PLEG): container finished" podID="a5b43c07-5847-4a7d-8c82-8079c3baf031" containerID="ae6cb65421446eef23b82846b98f3028e535f74b6c9e6a0234d7f824624f63a3" exitCode=0 Nov 21 14:45:17 crc kubenswrapper[5133]: I1121 14:45:17.853312 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ljl8s" event={"ID":"a5b43c07-5847-4a7d-8c82-8079c3baf031","Type":"ContainerDied","Data":"ae6cb65421446eef23b82846b98f3028e535f74b6c9e6a0234d7f824624f63a3"} Nov 21 14:45:21 crc kubenswrapper[5133]: I1121 14:45:21.888581 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ljl8s" event={"ID":"a5b43c07-5847-4a7d-8c82-8079c3baf031","Type":"ContainerStarted","Data":"30a3862082d1605dd4db4ac76f0672dad4484f06dd88149e0679adf7e96eb52c"} Nov 21 14:45:21 crc kubenswrapper[5133]: I1121 14:45:21.909670 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-ljl8s" podStartSLOduration=2.360600385 podStartE2EDuration="8.909654034s" podCreationTimestamp="2025-11-21 14:45:13 +0000 UTC" firstStartedPulling="2025-11-21 14:45:14.81493939 +0000 UTC m=+3774.612771638" lastFinishedPulling="2025-11-21 14:45:21.363993039 +0000 UTC m=+3781.161825287" observedRunningTime="2025-11-21 14:45:21.906596533 +0000 UTC m=+3781.704428781" watchObservedRunningTime="2025-11-21 14:45:21.909654034 +0000 UTC m=+3781.707486282" Nov 21 14:45:22 crc kubenswrapper[5133]: I1121 14:45:22.495908 5133 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-fc96bd488-z7zf6" podUID="63a7b11c-9f04-4a20-a705-a8ae4fe98285" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.242:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.242:8443: connect: connection refused" Nov 21 14:45:23 crc kubenswrapper[5133]: I1121 14:45:23.457659 5133 scope.go:117] "RemoveContainer" containerID="8580d7a7211cee4078050130d68876fc21d141fe3a8eaa4aa514ada3bc5ab459" Nov 21 14:45:23 crc kubenswrapper[5133]: E1121 14:45:23.458262 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:45:24 crc kubenswrapper[5133]: I1121 14:45:24.021203 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-ljl8s" Nov 21 14:45:24 crc kubenswrapper[5133]: I1121 14:45:24.021269 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-ljl8s" Nov 21 14:45:25 crc kubenswrapper[5133]: I1121 14:45:25.066340 5133 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-ljl8s" podUID="a5b43c07-5847-4a7d-8c82-8079c3baf031" containerName="registry-server" probeResult="failure" output=< Nov 21 14:45:25 crc kubenswrapper[5133]: timeout: failed to connect service ":50051" within 1s Nov 21 14:45:25 crc kubenswrapper[5133]: > Nov 21 14:45:32 crc kubenswrapper[5133]: I1121 14:45:32.495871 5133 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-fc96bd488-z7zf6" podUID="63a7b11c-9f04-4a20-a705-a8ae4fe98285" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.242:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.242:8443: connect: connection refused" Nov 21 14:45:34 crc kubenswrapper[5133]: I1121 14:45:34.067064 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-ljl8s" Nov 21 14:45:34 crc kubenswrapper[5133]: I1121 14:45:34.112141 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-ljl8s" Nov 21 14:45:34 crc kubenswrapper[5133]: I1121 14:45:34.300518 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-ljl8s"] Nov 21 14:45:35 crc kubenswrapper[5133]: I1121 14:45:35.026195 5133 generic.go:334] "Generic (PLEG): container finished" podID="63a7b11c-9f04-4a20-a705-a8ae4fe98285" containerID="8a673cfc580e0cbfdc91c9fd6e409f0dd4ef3e564de6c8bd155f353690ce5481" exitCode=137 Nov 21 14:45:35 crc kubenswrapper[5133]: I1121 14:45:35.026270 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-fc96bd488-z7zf6" event={"ID":"63a7b11c-9f04-4a20-a705-a8ae4fe98285","Type":"ContainerDied","Data":"8a673cfc580e0cbfdc91c9fd6e409f0dd4ef3e564de6c8bd155f353690ce5481"} Nov 21 14:45:35 crc kubenswrapper[5133]: I1121 14:45:35.457697 5133 scope.go:117] "RemoveContainer" containerID="8580d7a7211cee4078050130d68876fc21d141fe3a8eaa4aa514ada3bc5ab459" Nov 21 14:45:35 crc kubenswrapper[5133]: E1121 14:45:35.458057 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:45:35 crc kubenswrapper[5133]: I1121 14:45:35.635824 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-fc96bd488-z7zf6" Nov 21 14:45:35 crc kubenswrapper[5133]: I1121 14:45:35.691491 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/63a7b11c-9f04-4a20-a705-a8ae4fe98285-horizon-secret-key\") pod \"63a7b11c-9f04-4a20-a705-a8ae4fe98285\" (UID: \"63a7b11c-9f04-4a20-a705-a8ae4fe98285\") " Nov 21 14:45:35 crc kubenswrapper[5133]: I1121 14:45:35.691582 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x49ng\" (UniqueName: \"kubernetes.io/projected/63a7b11c-9f04-4a20-a705-a8ae4fe98285-kube-api-access-x49ng\") pod \"63a7b11c-9f04-4a20-a705-a8ae4fe98285\" (UID: \"63a7b11c-9f04-4a20-a705-a8ae4fe98285\") " Nov 21 14:45:35 crc kubenswrapper[5133]: I1121 14:45:35.691614 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/63a7b11c-9f04-4a20-a705-a8ae4fe98285-config-data\") pod \"63a7b11c-9f04-4a20-a705-a8ae4fe98285\" (UID: \"63a7b11c-9f04-4a20-a705-a8ae4fe98285\") " Nov 21 14:45:35 crc kubenswrapper[5133]: I1121 14:45:35.691653 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/63a7b11c-9f04-4a20-a705-a8ae4fe98285-logs\") pod \"63a7b11c-9f04-4a20-a705-a8ae4fe98285\" (UID: \"63a7b11c-9f04-4a20-a705-a8ae4fe98285\") " Nov 21 14:45:35 crc kubenswrapper[5133]: I1121 14:45:35.691767 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/63a7b11c-9f04-4a20-a705-a8ae4fe98285-scripts\") pod \"63a7b11c-9f04-4a20-a705-a8ae4fe98285\" (UID: \"63a7b11c-9f04-4a20-a705-a8ae4fe98285\") " Nov 21 14:45:35 crc kubenswrapper[5133]: I1121 14:45:35.691801 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/63a7b11c-9f04-4a20-a705-a8ae4fe98285-horizon-tls-certs\") pod \"63a7b11c-9f04-4a20-a705-a8ae4fe98285\" (UID: \"63a7b11c-9f04-4a20-a705-a8ae4fe98285\") " Nov 21 14:45:35 crc kubenswrapper[5133]: I1121 14:45:35.691818 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/63a7b11c-9f04-4a20-a705-a8ae4fe98285-combined-ca-bundle\") pod \"63a7b11c-9f04-4a20-a705-a8ae4fe98285\" (UID: \"63a7b11c-9f04-4a20-a705-a8ae4fe98285\") " Nov 21 14:45:35 crc kubenswrapper[5133]: I1121 14:45:35.692711 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/63a7b11c-9f04-4a20-a705-a8ae4fe98285-logs" (OuterVolumeSpecName: "logs") pod "63a7b11c-9f04-4a20-a705-a8ae4fe98285" (UID: "63a7b11c-9f04-4a20-a705-a8ae4fe98285"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:45:35 crc kubenswrapper[5133]: I1121 14:45:35.700610 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/63a7b11c-9f04-4a20-a705-a8ae4fe98285-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "63a7b11c-9f04-4a20-a705-a8ae4fe98285" (UID: "63a7b11c-9f04-4a20-a705-a8ae4fe98285"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:45:35 crc kubenswrapper[5133]: I1121 14:45:35.700624 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/63a7b11c-9f04-4a20-a705-a8ae4fe98285-kube-api-access-x49ng" (OuterVolumeSpecName: "kube-api-access-x49ng") pod "63a7b11c-9f04-4a20-a705-a8ae4fe98285" (UID: "63a7b11c-9f04-4a20-a705-a8ae4fe98285"). InnerVolumeSpecName "kube-api-access-x49ng". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:45:35 crc kubenswrapper[5133]: I1121 14:45:35.718261 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/63a7b11c-9f04-4a20-a705-a8ae4fe98285-scripts" (OuterVolumeSpecName: "scripts") pod "63a7b11c-9f04-4a20-a705-a8ae4fe98285" (UID: "63a7b11c-9f04-4a20-a705-a8ae4fe98285"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:45:35 crc kubenswrapper[5133]: I1121 14:45:35.721166 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/63a7b11c-9f04-4a20-a705-a8ae4fe98285-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "63a7b11c-9f04-4a20-a705-a8ae4fe98285" (UID: "63a7b11c-9f04-4a20-a705-a8ae4fe98285"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:45:35 crc kubenswrapper[5133]: I1121 14:45:35.723705 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/63a7b11c-9f04-4a20-a705-a8ae4fe98285-config-data" (OuterVolumeSpecName: "config-data") pod "63a7b11c-9f04-4a20-a705-a8ae4fe98285" (UID: "63a7b11c-9f04-4a20-a705-a8ae4fe98285"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:45:35 crc kubenswrapper[5133]: I1121 14:45:35.744547 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/63a7b11c-9f04-4a20-a705-a8ae4fe98285-horizon-tls-certs" (OuterVolumeSpecName: "horizon-tls-certs") pod "63a7b11c-9f04-4a20-a705-a8ae4fe98285" (UID: "63a7b11c-9f04-4a20-a705-a8ae4fe98285"). InnerVolumeSpecName "horizon-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:45:35 crc kubenswrapper[5133]: I1121 14:45:35.793875 5133 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/63a7b11c-9f04-4a20-a705-a8ae4fe98285-logs\") on node \"crc\" DevicePath \"\"" Nov 21 14:45:35 crc kubenswrapper[5133]: I1121 14:45:35.793917 5133 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/63a7b11c-9f04-4a20-a705-a8ae4fe98285-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 14:45:35 crc kubenswrapper[5133]: I1121 14:45:35.793929 5133 reconciler_common.go:293] "Volume detached for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/63a7b11c-9f04-4a20-a705-a8ae4fe98285-horizon-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 21 14:45:35 crc kubenswrapper[5133]: I1121 14:45:35.793940 5133 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/63a7b11c-9f04-4a20-a705-a8ae4fe98285-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 14:45:35 crc kubenswrapper[5133]: I1121 14:45:35.793950 5133 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/63a7b11c-9f04-4a20-a705-a8ae4fe98285-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 21 14:45:35 crc kubenswrapper[5133]: I1121 14:45:35.793958 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x49ng\" (UniqueName: \"kubernetes.io/projected/63a7b11c-9f04-4a20-a705-a8ae4fe98285-kube-api-access-x49ng\") on node \"crc\" DevicePath \"\"" Nov 21 14:45:35 crc kubenswrapper[5133]: I1121 14:45:35.793968 5133 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/63a7b11c-9f04-4a20-a705-a8ae4fe98285-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 14:45:36 crc kubenswrapper[5133]: I1121 14:45:36.037874 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-fc96bd488-z7zf6" event={"ID":"63a7b11c-9f04-4a20-a705-a8ae4fe98285","Type":"ContainerDied","Data":"7b6ed096b04e4b5029c5bc6b9d0edb0469fc557944e4a48e090452e6233e2741"} Nov 21 14:45:36 crc kubenswrapper[5133]: I1121 14:45:36.037936 5133 scope.go:117] "RemoveContainer" containerID="5b43aa4d56390f27cc697f1ee7c55bbfbb2ac957fc240386054df7ec367e1031" Nov 21 14:45:36 crc kubenswrapper[5133]: I1121 14:45:36.037936 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-fc96bd488-z7zf6" Nov 21 14:45:36 crc kubenswrapper[5133]: I1121 14:45:36.038543 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-ljl8s" podUID="a5b43c07-5847-4a7d-8c82-8079c3baf031" containerName="registry-server" containerID="cri-o://30a3862082d1605dd4db4ac76f0672dad4484f06dd88149e0679adf7e96eb52c" gracePeriod=2 Nov 21 14:45:36 crc kubenswrapper[5133]: I1121 14:45:36.080530 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-fc96bd488-z7zf6"] Nov 21 14:45:36 crc kubenswrapper[5133]: I1121 14:45:36.088722 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-fc96bd488-z7zf6"] Nov 21 14:45:36 crc kubenswrapper[5133]: I1121 14:45:36.212620 5133 scope.go:117] "RemoveContainer" containerID="8a673cfc580e0cbfdc91c9fd6e409f0dd4ef3e564de6c8bd155f353690ce5481" Nov 21 14:45:36 crc kubenswrapper[5133]: I1121 14:45:36.474315 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="63a7b11c-9f04-4a20-a705-a8ae4fe98285" path="/var/lib/kubelet/pods/63a7b11c-9f04-4a20-a705-a8ae4fe98285/volumes" Nov 21 14:45:37 crc kubenswrapper[5133]: I1121 14:45:37.049450 5133 generic.go:334] "Generic (PLEG): container finished" podID="a5b43c07-5847-4a7d-8c82-8079c3baf031" containerID="30a3862082d1605dd4db4ac76f0672dad4484f06dd88149e0679adf7e96eb52c" exitCode=0 Nov 21 14:45:37 crc kubenswrapper[5133]: I1121 14:45:37.049534 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ljl8s" event={"ID":"a5b43c07-5847-4a7d-8c82-8079c3baf031","Type":"ContainerDied","Data":"30a3862082d1605dd4db4ac76f0672dad4484f06dd88149e0679adf7e96eb52c"} Nov 21 14:45:37 crc kubenswrapper[5133]: I1121 14:45:37.724811 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ljl8s" Nov 21 14:45:37 crc kubenswrapper[5133]: I1121 14:45:37.832657 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a5b43c07-5847-4a7d-8c82-8079c3baf031-utilities\") pod \"a5b43c07-5847-4a7d-8c82-8079c3baf031\" (UID: \"a5b43c07-5847-4a7d-8c82-8079c3baf031\") " Nov 21 14:45:37 crc kubenswrapper[5133]: I1121 14:45:37.832697 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-49qgq\" (UniqueName: \"kubernetes.io/projected/a5b43c07-5847-4a7d-8c82-8079c3baf031-kube-api-access-49qgq\") pod \"a5b43c07-5847-4a7d-8c82-8079c3baf031\" (UID: \"a5b43c07-5847-4a7d-8c82-8079c3baf031\") " Nov 21 14:45:37 crc kubenswrapper[5133]: I1121 14:45:37.832769 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a5b43c07-5847-4a7d-8c82-8079c3baf031-catalog-content\") pod \"a5b43c07-5847-4a7d-8c82-8079c3baf031\" (UID: \"a5b43c07-5847-4a7d-8c82-8079c3baf031\") " Nov 21 14:45:37 crc kubenswrapper[5133]: I1121 14:45:37.833495 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a5b43c07-5847-4a7d-8c82-8079c3baf031-utilities" (OuterVolumeSpecName: "utilities") pod "a5b43c07-5847-4a7d-8c82-8079c3baf031" (UID: "a5b43c07-5847-4a7d-8c82-8079c3baf031"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:45:37 crc kubenswrapper[5133]: I1121 14:45:37.840249 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a5b43c07-5847-4a7d-8c82-8079c3baf031-kube-api-access-49qgq" (OuterVolumeSpecName: "kube-api-access-49qgq") pod "a5b43c07-5847-4a7d-8c82-8079c3baf031" (UID: "a5b43c07-5847-4a7d-8c82-8079c3baf031"). InnerVolumeSpecName "kube-api-access-49qgq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:45:37 crc kubenswrapper[5133]: I1121 14:45:37.919523 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a5b43c07-5847-4a7d-8c82-8079c3baf031-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a5b43c07-5847-4a7d-8c82-8079c3baf031" (UID: "a5b43c07-5847-4a7d-8c82-8079c3baf031"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:45:37 crc kubenswrapper[5133]: I1121 14:45:37.941638 5133 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a5b43c07-5847-4a7d-8c82-8079c3baf031-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 14:45:37 crc kubenswrapper[5133]: I1121 14:45:37.941673 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-49qgq\" (UniqueName: \"kubernetes.io/projected/a5b43c07-5847-4a7d-8c82-8079c3baf031-kube-api-access-49qgq\") on node \"crc\" DevicePath \"\"" Nov 21 14:45:37 crc kubenswrapper[5133]: I1121 14:45:37.941690 5133 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a5b43c07-5847-4a7d-8c82-8079c3baf031-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 14:45:38 crc kubenswrapper[5133]: I1121 14:45:38.060916 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ljl8s" event={"ID":"a5b43c07-5847-4a7d-8c82-8079c3baf031","Type":"ContainerDied","Data":"42defe9d763e9e213eaa1eacf8b38bc6f66ee0938a765fec547372f6b160ba2a"} Nov 21 14:45:38 crc kubenswrapper[5133]: I1121 14:45:38.060980 5133 scope.go:117] "RemoveContainer" containerID="30a3862082d1605dd4db4ac76f0672dad4484f06dd88149e0679adf7e96eb52c" Nov 21 14:45:38 crc kubenswrapper[5133]: I1121 14:45:38.061015 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ljl8s" Nov 21 14:45:38 crc kubenswrapper[5133]: I1121 14:45:38.080987 5133 scope.go:117] "RemoveContainer" containerID="ae6cb65421446eef23b82846b98f3028e535f74b6c9e6a0234d7f824624f63a3" Nov 21 14:45:38 crc kubenswrapper[5133]: I1121 14:45:38.096423 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-ljl8s"] Nov 21 14:45:38 crc kubenswrapper[5133]: I1121 14:45:38.104957 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-ljl8s"] Nov 21 14:45:38 crc kubenswrapper[5133]: I1121 14:45:38.362564 5133 scope.go:117] "RemoveContainer" containerID="2d190d47cf1209c159e46f148f24fc2a5544899151715676269417110d752a8b" Nov 21 14:45:38 crc kubenswrapper[5133]: I1121 14:45:38.473321 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a5b43c07-5847-4a7d-8c82-8079c3baf031" path="/var/lib/kubelet/pods/a5b43c07-5847-4a7d-8c82-8079c3baf031/volumes" Nov 21 14:45:39 crc kubenswrapper[5133]: I1121 14:45:39.883661 5133 scope.go:117] "RemoveContainer" containerID="6f2fb749f68cbec474032f1b6521017cd777b9c458ecab821bce31f47ea71192" Nov 21 14:45:48 crc kubenswrapper[5133]: I1121 14:45:48.458578 5133 scope.go:117] "RemoveContainer" containerID="8580d7a7211cee4078050130d68876fc21d141fe3a8eaa4aa514ada3bc5ab459" Nov 21 14:45:48 crc kubenswrapper[5133]: E1121 14:45:48.459436 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:46:01 crc kubenswrapper[5133]: I1121 14:46:01.457578 5133 scope.go:117] "RemoveContainer" containerID="8580d7a7211cee4078050130d68876fc21d141fe3a8eaa4aa514ada3bc5ab459" Nov 21 14:46:01 crc kubenswrapper[5133]: E1121 14:46:01.458343 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:46:12 crc kubenswrapper[5133]: I1121 14:46:12.464208 5133 scope.go:117] "RemoveContainer" containerID="8580d7a7211cee4078050130d68876fc21d141fe3a8eaa4aa514ada3bc5ab459" Nov 21 14:46:12 crc kubenswrapper[5133]: E1121 14:46:12.465238 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:46:20 crc kubenswrapper[5133]: I1121 14:46:20.650881 5133 generic.go:334] "Generic (PLEG): container finished" podID="4126fdfa-1d4a-4276-951e-c1a26a3492dd" containerID="bdd71fb7ddc6251ba22a8e7c91f642a797a40bb264e124d59c897d45aff00ef5" exitCode=0 Nov 21 14:46:20 crc kubenswrapper[5133]: I1121 14:46:20.651184 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-sync-k4xmx" event={"ID":"4126fdfa-1d4a-4276-951e-c1a26a3492dd","Type":"ContainerDied","Data":"bdd71fb7ddc6251ba22a8e7c91f642a797a40bb264e124d59c897d45aff00ef5"} Nov 21 14:46:22 crc kubenswrapper[5133]: I1121 14:46:22.129419 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-sync-k4xmx" Nov 21 14:46:22 crc kubenswrapper[5133]: I1121 14:46:22.263375 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4126fdfa-1d4a-4276-951e-c1a26a3492dd-combined-ca-bundle\") pod \"4126fdfa-1d4a-4276-951e-c1a26a3492dd\" (UID: \"4126fdfa-1d4a-4276-951e-c1a26a3492dd\") " Nov 21 14:46:22 crc kubenswrapper[5133]: I1121 14:46:22.263446 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4126fdfa-1d4a-4276-951e-c1a26a3492dd-config-data\") pod \"4126fdfa-1d4a-4276-951e-c1a26a3492dd\" (UID: \"4126fdfa-1d4a-4276-951e-c1a26a3492dd\") " Nov 21 14:46:22 crc kubenswrapper[5133]: I1121 14:46:22.263487 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ws7d5\" (UniqueName: \"kubernetes.io/projected/4126fdfa-1d4a-4276-951e-c1a26a3492dd-kube-api-access-ws7d5\") pod \"4126fdfa-1d4a-4276-951e-c1a26a3492dd\" (UID: \"4126fdfa-1d4a-4276-951e-c1a26a3492dd\") " Nov 21 14:46:22 crc kubenswrapper[5133]: I1121 14:46:22.263670 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/4126fdfa-1d4a-4276-951e-c1a26a3492dd-job-config-data\") pod \"4126fdfa-1d4a-4276-951e-c1a26a3492dd\" (UID: \"4126fdfa-1d4a-4276-951e-c1a26a3492dd\") " Nov 21 14:46:22 crc kubenswrapper[5133]: I1121 14:46:22.268990 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4126fdfa-1d4a-4276-951e-c1a26a3492dd-kube-api-access-ws7d5" (OuterVolumeSpecName: "kube-api-access-ws7d5") pod "4126fdfa-1d4a-4276-951e-c1a26a3492dd" (UID: "4126fdfa-1d4a-4276-951e-c1a26a3492dd"). InnerVolumeSpecName "kube-api-access-ws7d5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:46:22 crc kubenswrapper[5133]: I1121 14:46:22.269289 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4126fdfa-1d4a-4276-951e-c1a26a3492dd-job-config-data" (OuterVolumeSpecName: "job-config-data") pod "4126fdfa-1d4a-4276-951e-c1a26a3492dd" (UID: "4126fdfa-1d4a-4276-951e-c1a26a3492dd"). InnerVolumeSpecName "job-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:46:22 crc kubenswrapper[5133]: I1121 14:46:22.271406 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4126fdfa-1d4a-4276-951e-c1a26a3492dd-config-data" (OuterVolumeSpecName: "config-data") pod "4126fdfa-1d4a-4276-951e-c1a26a3492dd" (UID: "4126fdfa-1d4a-4276-951e-c1a26a3492dd"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:46:22 crc kubenswrapper[5133]: I1121 14:46:22.291467 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4126fdfa-1d4a-4276-951e-c1a26a3492dd-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4126fdfa-1d4a-4276-951e-c1a26a3492dd" (UID: "4126fdfa-1d4a-4276-951e-c1a26a3492dd"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:46:22 crc kubenswrapper[5133]: I1121 14:46:22.374774 5133 reconciler_common.go:293] "Volume detached for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/4126fdfa-1d4a-4276-951e-c1a26a3492dd-job-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 14:46:22 crc kubenswrapper[5133]: I1121 14:46:22.374838 5133 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4126fdfa-1d4a-4276-951e-c1a26a3492dd-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 14:46:22 crc kubenswrapper[5133]: I1121 14:46:22.374851 5133 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4126fdfa-1d4a-4276-951e-c1a26a3492dd-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 14:46:22 crc kubenswrapper[5133]: I1121 14:46:22.374864 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ws7d5\" (UniqueName: \"kubernetes.io/projected/4126fdfa-1d4a-4276-951e-c1a26a3492dd-kube-api-access-ws7d5\") on node \"crc\" DevicePath \"\"" Nov 21 14:46:22 crc kubenswrapper[5133]: I1121 14:46:22.677233 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-sync-k4xmx" event={"ID":"4126fdfa-1d4a-4276-951e-c1a26a3492dd","Type":"ContainerDied","Data":"83c13b3c99d2005fdb839ed270176b0677b505e8da404bc7c6a87179c006a84c"} Nov 21 14:46:22 crc kubenswrapper[5133]: I1121 14:46:22.677277 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="83c13b3c99d2005fdb839ed270176b0677b505e8da404bc7c6a87179c006a84c" Nov 21 14:46:22 crc kubenswrapper[5133]: I1121 14:46:22.677398 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-sync-k4xmx" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.060162 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-scheduler-0"] Nov 21 14:46:23 crc kubenswrapper[5133]: E1121 14:46:23.060637 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a5b43c07-5847-4a7d-8c82-8079c3baf031" containerName="extract-utilities" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.060661 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="a5b43c07-5847-4a7d-8c82-8079c3baf031" containerName="extract-utilities" Nov 21 14:46:23 crc kubenswrapper[5133]: E1121 14:46:23.060677 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4126fdfa-1d4a-4276-951e-c1a26a3492dd" containerName="manila-db-sync" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.060685 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="4126fdfa-1d4a-4276-951e-c1a26a3492dd" containerName="manila-db-sync" Nov 21 14:46:23 crc kubenswrapper[5133]: E1121 14:46:23.060699 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a5b43c07-5847-4a7d-8c82-8079c3baf031" containerName="registry-server" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.060707 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="a5b43c07-5847-4a7d-8c82-8079c3baf031" containerName="registry-server" Nov 21 14:46:23 crc kubenswrapper[5133]: E1121 14:46:23.060733 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="63a7b11c-9f04-4a20-a705-a8ae4fe98285" containerName="horizon-log" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.060740 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="63a7b11c-9f04-4a20-a705-a8ae4fe98285" containerName="horizon-log" Nov 21 14:46:23 crc kubenswrapper[5133]: E1121 14:46:23.060756 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="63a7b11c-9f04-4a20-a705-a8ae4fe98285" containerName="horizon" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.060763 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="63a7b11c-9f04-4a20-a705-a8ae4fe98285" containerName="horizon" Nov 21 14:46:23 crc kubenswrapper[5133]: E1121 14:46:23.060786 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a5b43c07-5847-4a7d-8c82-8079c3baf031" containerName="extract-content" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.060793 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="a5b43c07-5847-4a7d-8c82-8079c3baf031" containerName="extract-content" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.061036 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="63a7b11c-9f04-4a20-a705-a8ae4fe98285" containerName="horizon" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.061058 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="a5b43c07-5847-4a7d-8c82-8079c3baf031" containerName="registry-server" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.061080 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="63a7b11c-9f04-4a20-a705-a8ae4fe98285" containerName="horizon-log" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.061095 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="4126fdfa-1d4a-4276-951e-c1a26a3492dd" containerName="manila-db-sync" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.062306 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-scheduler-0" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.069959 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-config-data" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.073119 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-scheduler-config-data" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.073610 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-manila-dockercfg-5zdnm" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.077233 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-scripts" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.080726 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-scheduler-0"] Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.129449 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-share-share1-0"] Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.131073 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-share-share1-0" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.135397 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-share-share1-config-data" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.140917 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-share-share1-0"] Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.193844 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fdg9t\" (UniqueName: \"kubernetes.io/projected/25cbce04-5851-4568-b3f3-fb5eefbd4b7f-kube-api-access-fdg9t\") pod \"manila-scheduler-0\" (UID: \"25cbce04-5851-4568-b3f3-fb5eefbd4b7f\") " pod="openstack/manila-scheduler-0" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.193903 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/25cbce04-5851-4568-b3f3-fb5eefbd4b7f-scripts\") pod \"manila-scheduler-0\" (UID: \"25cbce04-5851-4568-b3f3-fb5eefbd4b7f\") " pod="openstack/manila-scheduler-0" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.195581 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/25cbce04-5851-4568-b3f3-fb5eefbd4b7f-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"25cbce04-5851-4568-b3f3-fb5eefbd4b7f\") " pod="openstack/manila-scheduler-0" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.195702 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/25cbce04-5851-4568-b3f3-fb5eefbd4b7f-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"25cbce04-5851-4568-b3f3-fb5eefbd4b7f\") " pod="openstack/manila-scheduler-0" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.195892 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/25cbce04-5851-4568-b3f3-fb5eefbd4b7f-config-data\") pod \"manila-scheduler-0\" (UID: \"25cbce04-5851-4568-b3f3-fb5eefbd4b7f\") " pod="openstack/manila-scheduler-0" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.196115 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/25cbce04-5851-4568-b3f3-fb5eefbd4b7f-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"25cbce04-5851-4568-b3f3-fb5eefbd4b7f\") " pod="openstack/manila-scheduler-0" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.196248 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-69655fd4bf-xltwv"] Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.198365 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-69655fd4bf-xltwv" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.203786 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-69655fd4bf-xltwv"] Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.297850 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/25cbce04-5851-4568-b3f3-fb5eefbd4b7f-config-data\") pod \"manila-scheduler-0\" (UID: \"25cbce04-5851-4568-b3f3-fb5eefbd4b7f\") " pod="openstack/manila-scheduler-0" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.297922 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d68c59a3-abc6-4cdd-bdfa-f3eac726fa81-etc-machine-id\") pod \"manila-share-share1-0\" (UID: \"d68c59a3-abc6-4cdd-bdfa-f3eac726fa81\") " pod="openstack/manila-share-share1-0" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.297949 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d68c59a3-abc6-4cdd-bdfa-f3eac726fa81-config-data-custom\") pod \"manila-share-share1-0\" (UID: \"d68c59a3-abc6-4cdd-bdfa-f3eac726fa81\") " pod="openstack/manila-share-share1-0" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.297990 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/06549e5c-262d-45b3-9790-7c264bcecf3c-ovsdbserver-sb\") pod \"dnsmasq-dns-69655fd4bf-xltwv\" (UID: \"06549e5c-262d-45b3-9790-7c264bcecf3c\") " pod="openstack/dnsmasq-dns-69655fd4bf-xltwv" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.298068 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ckr6s\" (UniqueName: \"kubernetes.io/projected/06549e5c-262d-45b3-9790-7c264bcecf3c-kube-api-access-ckr6s\") pod \"dnsmasq-dns-69655fd4bf-xltwv\" (UID: \"06549e5c-262d-45b3-9790-7c264bcecf3c\") " pod="openstack/dnsmasq-dns-69655fd4bf-xltwv" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.298117 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/25cbce04-5851-4568-b3f3-fb5eefbd4b7f-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"25cbce04-5851-4568-b3f3-fb5eefbd4b7f\") " pod="openstack/manila-scheduler-0" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.298145 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fdg9t\" (UniqueName: \"kubernetes.io/projected/25cbce04-5851-4568-b3f3-fb5eefbd4b7f-kube-api-access-fdg9t\") pod \"manila-scheduler-0\" (UID: \"25cbce04-5851-4568-b3f3-fb5eefbd4b7f\") " pod="openstack/manila-scheduler-0" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.298166 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d68c59a3-abc6-4cdd-bdfa-f3eac726fa81-scripts\") pod \"manila-share-share1-0\" (UID: \"d68c59a3-abc6-4cdd-bdfa-f3eac726fa81\") " pod="openstack/manila-share-share1-0" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.298183 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/25cbce04-5851-4568-b3f3-fb5eefbd4b7f-scripts\") pod \"manila-scheduler-0\" (UID: \"25cbce04-5851-4568-b3f3-fb5eefbd4b7f\") " pod="openstack/manila-scheduler-0" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.298212 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z2x2w\" (UniqueName: \"kubernetes.io/projected/d68c59a3-abc6-4cdd-bdfa-f3eac726fa81-kube-api-access-z2x2w\") pod \"manila-share-share1-0\" (UID: \"d68c59a3-abc6-4cdd-bdfa-f3eac726fa81\") " pod="openstack/manila-share-share1-0" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.298242 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d68c59a3-abc6-4cdd-bdfa-f3eac726fa81-config-data\") pod \"manila-share-share1-0\" (UID: \"d68c59a3-abc6-4cdd-bdfa-f3eac726fa81\") " pod="openstack/manila-share-share1-0" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.298270 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/06549e5c-262d-45b3-9790-7c264bcecf3c-config\") pod \"dnsmasq-dns-69655fd4bf-xltwv\" (UID: \"06549e5c-262d-45b3-9790-7c264bcecf3c\") " pod="openstack/dnsmasq-dns-69655fd4bf-xltwv" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.298291 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/25cbce04-5851-4568-b3f3-fb5eefbd4b7f-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"25cbce04-5851-4568-b3f3-fb5eefbd4b7f\") " pod="openstack/manila-scheduler-0" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.298318 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/25cbce04-5851-4568-b3f3-fb5eefbd4b7f-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"25cbce04-5851-4568-b3f3-fb5eefbd4b7f\") " pod="openstack/manila-scheduler-0" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.298346 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/06549e5c-262d-45b3-9790-7c264bcecf3c-openstack-edpm-ipam\") pod \"dnsmasq-dns-69655fd4bf-xltwv\" (UID: \"06549e5c-262d-45b3-9790-7c264bcecf3c\") " pod="openstack/dnsmasq-dns-69655fd4bf-xltwv" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.298375 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d68c59a3-abc6-4cdd-bdfa-f3eac726fa81-combined-ca-bundle\") pod \"manila-share-share1-0\" (UID: \"d68c59a3-abc6-4cdd-bdfa-f3eac726fa81\") " pod="openstack/manila-share-share1-0" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.298396 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/d68c59a3-abc6-4cdd-bdfa-f3eac726fa81-ceph\") pod \"manila-share-share1-0\" (UID: \"d68c59a3-abc6-4cdd-bdfa-f3eac726fa81\") " pod="openstack/manila-share-share1-0" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.298427 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/06549e5c-262d-45b3-9790-7c264bcecf3c-dns-svc\") pod \"dnsmasq-dns-69655fd4bf-xltwv\" (UID: \"06549e5c-262d-45b3-9790-7c264bcecf3c\") " pod="openstack/dnsmasq-dns-69655fd4bf-xltwv" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.298456 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/d68c59a3-abc6-4cdd-bdfa-f3eac726fa81-var-lib-manila\") pod \"manila-share-share1-0\" (UID: \"d68c59a3-abc6-4cdd-bdfa-f3eac726fa81\") " pod="openstack/manila-share-share1-0" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.298505 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/06549e5c-262d-45b3-9790-7c264bcecf3c-ovsdbserver-nb\") pod \"dnsmasq-dns-69655fd4bf-xltwv\" (UID: \"06549e5c-262d-45b3-9790-7c264bcecf3c\") " pod="openstack/dnsmasq-dns-69655fd4bf-xltwv" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.303454 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/25cbce04-5851-4568-b3f3-fb5eefbd4b7f-config-data\") pod \"manila-scheduler-0\" (UID: \"25cbce04-5851-4568-b3f3-fb5eefbd4b7f\") " pod="openstack/manila-scheduler-0" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.303720 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/25cbce04-5851-4568-b3f3-fb5eefbd4b7f-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"25cbce04-5851-4568-b3f3-fb5eefbd4b7f\") " pod="openstack/manila-scheduler-0" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.304123 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/25cbce04-5851-4568-b3f3-fb5eefbd4b7f-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"25cbce04-5851-4568-b3f3-fb5eefbd4b7f\") " pod="openstack/manila-scheduler-0" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.312495 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/25cbce04-5851-4568-b3f3-fb5eefbd4b7f-scripts\") pod \"manila-scheduler-0\" (UID: \"25cbce04-5851-4568-b3f3-fb5eefbd4b7f\") " pod="openstack/manila-scheduler-0" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.312714 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/25cbce04-5851-4568-b3f3-fb5eefbd4b7f-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"25cbce04-5851-4568-b3f3-fb5eefbd4b7f\") " pod="openstack/manila-scheduler-0" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.327801 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fdg9t\" (UniqueName: \"kubernetes.io/projected/25cbce04-5851-4568-b3f3-fb5eefbd4b7f-kube-api-access-fdg9t\") pod \"manila-scheduler-0\" (UID: \"25cbce04-5851-4568-b3f3-fb5eefbd4b7f\") " pod="openstack/manila-scheduler-0" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.381794 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-api-0"] Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.384532 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-api-0" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.388167 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-api-config-data" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.389565 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-scheduler-0" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.399798 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/06549e5c-262d-45b3-9790-7c264bcecf3c-ovsdbserver-sb\") pod \"dnsmasq-dns-69655fd4bf-xltwv\" (UID: \"06549e5c-262d-45b3-9790-7c264bcecf3c\") " pod="openstack/dnsmasq-dns-69655fd4bf-xltwv" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.399851 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ckr6s\" (UniqueName: \"kubernetes.io/projected/06549e5c-262d-45b3-9790-7c264bcecf3c-kube-api-access-ckr6s\") pod \"dnsmasq-dns-69655fd4bf-xltwv\" (UID: \"06549e5c-262d-45b3-9790-7c264bcecf3c\") " pod="openstack/dnsmasq-dns-69655fd4bf-xltwv" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.399916 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d68c59a3-abc6-4cdd-bdfa-f3eac726fa81-scripts\") pod \"manila-share-share1-0\" (UID: \"d68c59a3-abc6-4cdd-bdfa-f3eac726fa81\") " pod="openstack/manila-share-share1-0" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.399957 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z2x2w\" (UniqueName: \"kubernetes.io/projected/d68c59a3-abc6-4cdd-bdfa-f3eac726fa81-kube-api-access-z2x2w\") pod \"manila-share-share1-0\" (UID: \"d68c59a3-abc6-4cdd-bdfa-f3eac726fa81\") " pod="openstack/manila-share-share1-0" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.399989 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d68c59a3-abc6-4cdd-bdfa-f3eac726fa81-config-data\") pod \"manila-share-share1-0\" (UID: \"d68c59a3-abc6-4cdd-bdfa-f3eac726fa81\") " pod="openstack/manila-share-share1-0" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.400043 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/06549e5c-262d-45b3-9790-7c264bcecf3c-config\") pod \"dnsmasq-dns-69655fd4bf-xltwv\" (UID: \"06549e5c-262d-45b3-9790-7c264bcecf3c\") " pod="openstack/dnsmasq-dns-69655fd4bf-xltwv" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.400086 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/06549e5c-262d-45b3-9790-7c264bcecf3c-openstack-edpm-ipam\") pod \"dnsmasq-dns-69655fd4bf-xltwv\" (UID: \"06549e5c-262d-45b3-9790-7c264bcecf3c\") " pod="openstack/dnsmasq-dns-69655fd4bf-xltwv" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.400121 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d68c59a3-abc6-4cdd-bdfa-f3eac726fa81-combined-ca-bundle\") pod \"manila-share-share1-0\" (UID: \"d68c59a3-abc6-4cdd-bdfa-f3eac726fa81\") " pod="openstack/manila-share-share1-0" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.400140 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/d68c59a3-abc6-4cdd-bdfa-f3eac726fa81-ceph\") pod \"manila-share-share1-0\" (UID: \"d68c59a3-abc6-4cdd-bdfa-f3eac726fa81\") " pod="openstack/manila-share-share1-0" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.400167 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/06549e5c-262d-45b3-9790-7c264bcecf3c-dns-svc\") pod \"dnsmasq-dns-69655fd4bf-xltwv\" (UID: \"06549e5c-262d-45b3-9790-7c264bcecf3c\") " pod="openstack/dnsmasq-dns-69655fd4bf-xltwv" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.400194 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/d68c59a3-abc6-4cdd-bdfa-f3eac726fa81-var-lib-manila\") pod \"manila-share-share1-0\" (UID: \"d68c59a3-abc6-4cdd-bdfa-f3eac726fa81\") " pod="openstack/manila-share-share1-0" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.400217 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/06549e5c-262d-45b3-9790-7c264bcecf3c-ovsdbserver-nb\") pod \"dnsmasq-dns-69655fd4bf-xltwv\" (UID: \"06549e5c-262d-45b3-9790-7c264bcecf3c\") " pod="openstack/dnsmasq-dns-69655fd4bf-xltwv" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.400262 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d68c59a3-abc6-4cdd-bdfa-f3eac726fa81-etc-machine-id\") pod \"manila-share-share1-0\" (UID: \"d68c59a3-abc6-4cdd-bdfa-f3eac726fa81\") " pod="openstack/manila-share-share1-0" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.400284 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d68c59a3-abc6-4cdd-bdfa-f3eac726fa81-config-data-custom\") pod \"manila-share-share1-0\" (UID: \"d68c59a3-abc6-4cdd-bdfa-f3eac726fa81\") " pod="openstack/manila-share-share1-0" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.400576 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/d68c59a3-abc6-4cdd-bdfa-f3eac726fa81-var-lib-manila\") pod \"manila-share-share1-0\" (UID: \"d68c59a3-abc6-4cdd-bdfa-f3eac726fa81\") " pod="openstack/manila-share-share1-0" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.400727 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/06549e5c-262d-45b3-9790-7c264bcecf3c-ovsdbserver-sb\") pod \"dnsmasq-dns-69655fd4bf-xltwv\" (UID: \"06549e5c-262d-45b3-9790-7c264bcecf3c\") " pod="openstack/dnsmasq-dns-69655fd4bf-xltwv" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.401137 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/06549e5c-262d-45b3-9790-7c264bcecf3c-config\") pod \"dnsmasq-dns-69655fd4bf-xltwv\" (UID: \"06549e5c-262d-45b3-9790-7c264bcecf3c\") " pod="openstack/dnsmasq-dns-69655fd4bf-xltwv" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.401137 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/06549e5c-262d-45b3-9790-7c264bcecf3c-openstack-edpm-ipam\") pod \"dnsmasq-dns-69655fd4bf-xltwv\" (UID: \"06549e5c-262d-45b3-9790-7c264bcecf3c\") " pod="openstack/dnsmasq-dns-69655fd4bf-xltwv" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.401175 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d68c59a3-abc6-4cdd-bdfa-f3eac726fa81-etc-machine-id\") pod \"manila-share-share1-0\" (UID: \"d68c59a3-abc6-4cdd-bdfa-f3eac726fa81\") " pod="openstack/manila-share-share1-0" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.401501 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/06549e5c-262d-45b3-9790-7c264bcecf3c-ovsdbserver-nb\") pod \"dnsmasq-dns-69655fd4bf-xltwv\" (UID: \"06549e5c-262d-45b3-9790-7c264bcecf3c\") " pod="openstack/dnsmasq-dns-69655fd4bf-xltwv" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.401625 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/06549e5c-262d-45b3-9790-7c264bcecf3c-dns-svc\") pod \"dnsmasq-dns-69655fd4bf-xltwv\" (UID: \"06549e5c-262d-45b3-9790-7c264bcecf3c\") " pod="openstack/dnsmasq-dns-69655fd4bf-xltwv" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.403981 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d68c59a3-abc6-4cdd-bdfa-f3eac726fa81-config-data-custom\") pod \"manila-share-share1-0\" (UID: \"d68c59a3-abc6-4cdd-bdfa-f3eac726fa81\") " pod="openstack/manila-share-share1-0" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.403988 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-api-0"] Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.405278 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d68c59a3-abc6-4cdd-bdfa-f3eac726fa81-config-data\") pod \"manila-share-share1-0\" (UID: \"d68c59a3-abc6-4cdd-bdfa-f3eac726fa81\") " pod="openstack/manila-share-share1-0" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.409112 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d68c59a3-abc6-4cdd-bdfa-f3eac726fa81-combined-ca-bundle\") pod \"manila-share-share1-0\" (UID: \"d68c59a3-abc6-4cdd-bdfa-f3eac726fa81\") " pod="openstack/manila-share-share1-0" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.414536 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/d68c59a3-abc6-4cdd-bdfa-f3eac726fa81-ceph\") pod \"manila-share-share1-0\" (UID: \"d68c59a3-abc6-4cdd-bdfa-f3eac726fa81\") " pod="openstack/manila-share-share1-0" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.423179 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d68c59a3-abc6-4cdd-bdfa-f3eac726fa81-scripts\") pod \"manila-share-share1-0\" (UID: \"d68c59a3-abc6-4cdd-bdfa-f3eac726fa81\") " pod="openstack/manila-share-share1-0" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.436820 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z2x2w\" (UniqueName: \"kubernetes.io/projected/d68c59a3-abc6-4cdd-bdfa-f3eac726fa81-kube-api-access-z2x2w\") pod \"manila-share-share1-0\" (UID: \"d68c59a3-abc6-4cdd-bdfa-f3eac726fa81\") " pod="openstack/manila-share-share1-0" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.458204 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ckr6s\" (UniqueName: \"kubernetes.io/projected/06549e5c-262d-45b3-9790-7c264bcecf3c-kube-api-access-ckr6s\") pod \"dnsmasq-dns-69655fd4bf-xltwv\" (UID: \"06549e5c-262d-45b3-9790-7c264bcecf3c\") " pod="openstack/dnsmasq-dns-69655fd4bf-xltwv" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.458759 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-share-share1-0" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.502524 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5f5d9853-36e3-4aa0-b97e-4f5e3de627c1-config-data-custom\") pod \"manila-api-0\" (UID: \"5f5d9853-36e3-4aa0-b97e-4f5e3de627c1\") " pod="openstack/manila-api-0" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.502568 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5f5d9853-36e3-4aa0-b97e-4f5e3de627c1-etc-machine-id\") pod \"manila-api-0\" (UID: \"5f5d9853-36e3-4aa0-b97e-4f5e3de627c1\") " pod="openstack/manila-api-0" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.502582 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5f5d9853-36e3-4aa0-b97e-4f5e3de627c1-config-data\") pod \"manila-api-0\" (UID: \"5f5d9853-36e3-4aa0-b97e-4f5e3de627c1\") " pod="openstack/manila-api-0" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.502616 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f5d9853-36e3-4aa0-b97e-4f5e3de627c1-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"5f5d9853-36e3-4aa0-b97e-4f5e3de627c1\") " pod="openstack/manila-api-0" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.502634 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r8lfv\" (UniqueName: \"kubernetes.io/projected/5f5d9853-36e3-4aa0-b97e-4f5e3de627c1-kube-api-access-r8lfv\") pod \"manila-api-0\" (UID: \"5f5d9853-36e3-4aa0-b97e-4f5e3de627c1\") " pod="openstack/manila-api-0" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.502724 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5f5d9853-36e3-4aa0-b97e-4f5e3de627c1-logs\") pod \"manila-api-0\" (UID: \"5f5d9853-36e3-4aa0-b97e-4f5e3de627c1\") " pod="openstack/manila-api-0" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.502967 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5f5d9853-36e3-4aa0-b97e-4f5e3de627c1-scripts\") pod \"manila-api-0\" (UID: \"5f5d9853-36e3-4aa0-b97e-4f5e3de627c1\") " pod="openstack/manila-api-0" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.525163 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-69655fd4bf-xltwv" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.605940 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5f5d9853-36e3-4aa0-b97e-4f5e3de627c1-config-data-custom\") pod \"manila-api-0\" (UID: \"5f5d9853-36e3-4aa0-b97e-4f5e3de627c1\") " pod="openstack/manila-api-0" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.605986 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5f5d9853-36e3-4aa0-b97e-4f5e3de627c1-etc-machine-id\") pod \"manila-api-0\" (UID: \"5f5d9853-36e3-4aa0-b97e-4f5e3de627c1\") " pod="openstack/manila-api-0" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.606010 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5f5d9853-36e3-4aa0-b97e-4f5e3de627c1-config-data\") pod \"manila-api-0\" (UID: \"5f5d9853-36e3-4aa0-b97e-4f5e3de627c1\") " pod="openstack/manila-api-0" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.606058 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f5d9853-36e3-4aa0-b97e-4f5e3de627c1-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"5f5d9853-36e3-4aa0-b97e-4f5e3de627c1\") " pod="openstack/manila-api-0" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.606075 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r8lfv\" (UniqueName: \"kubernetes.io/projected/5f5d9853-36e3-4aa0-b97e-4f5e3de627c1-kube-api-access-r8lfv\") pod \"manila-api-0\" (UID: \"5f5d9853-36e3-4aa0-b97e-4f5e3de627c1\") " pod="openstack/manila-api-0" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.606140 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5f5d9853-36e3-4aa0-b97e-4f5e3de627c1-logs\") pod \"manila-api-0\" (UID: \"5f5d9853-36e3-4aa0-b97e-4f5e3de627c1\") " pod="openstack/manila-api-0" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.606210 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5f5d9853-36e3-4aa0-b97e-4f5e3de627c1-scripts\") pod \"manila-api-0\" (UID: \"5f5d9853-36e3-4aa0-b97e-4f5e3de627c1\") " pod="openstack/manila-api-0" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.609735 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5f5d9853-36e3-4aa0-b97e-4f5e3de627c1-etc-machine-id\") pod \"manila-api-0\" (UID: \"5f5d9853-36e3-4aa0-b97e-4f5e3de627c1\") " pod="openstack/manila-api-0" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.611371 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5f5d9853-36e3-4aa0-b97e-4f5e3de627c1-logs\") pod \"manila-api-0\" (UID: \"5f5d9853-36e3-4aa0-b97e-4f5e3de627c1\") " pod="openstack/manila-api-0" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.613390 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5f5d9853-36e3-4aa0-b97e-4f5e3de627c1-config-data-custom\") pod \"manila-api-0\" (UID: \"5f5d9853-36e3-4aa0-b97e-4f5e3de627c1\") " pod="openstack/manila-api-0" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.621418 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f5d9853-36e3-4aa0-b97e-4f5e3de627c1-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"5f5d9853-36e3-4aa0-b97e-4f5e3de627c1\") " pod="openstack/manila-api-0" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.624282 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5f5d9853-36e3-4aa0-b97e-4f5e3de627c1-config-data\") pod \"manila-api-0\" (UID: \"5f5d9853-36e3-4aa0-b97e-4f5e3de627c1\") " pod="openstack/manila-api-0" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.625447 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5f5d9853-36e3-4aa0-b97e-4f5e3de627c1-scripts\") pod \"manila-api-0\" (UID: \"5f5d9853-36e3-4aa0-b97e-4f5e3de627c1\") " pod="openstack/manila-api-0" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.630220 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r8lfv\" (UniqueName: \"kubernetes.io/projected/5f5d9853-36e3-4aa0-b97e-4f5e3de627c1-kube-api-access-r8lfv\") pod \"manila-api-0\" (UID: \"5f5d9853-36e3-4aa0-b97e-4f5e3de627c1\") " pod="openstack/manila-api-0" Nov 21 14:46:23 crc kubenswrapper[5133]: I1121 14:46:23.710837 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-api-0" Nov 21 14:46:24 crc kubenswrapper[5133]: I1121 14:46:24.017620 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-scheduler-0"] Nov 21 14:46:24 crc kubenswrapper[5133]: I1121 14:46:24.194669 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-share-share1-0"] Nov 21 14:46:24 crc kubenswrapper[5133]: W1121 14:46:24.196754 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd68c59a3_abc6_4cdd_bdfa_f3eac726fa81.slice/crio-8a65168923f92843875ad4b5571ba383588666664c3deaeacb81ed586418d6f1 WatchSource:0}: Error finding container 8a65168923f92843875ad4b5571ba383588666664c3deaeacb81ed586418d6f1: Status 404 returned error can't find the container with id 8a65168923f92843875ad4b5571ba383588666664c3deaeacb81ed586418d6f1 Nov 21 14:46:24 crc kubenswrapper[5133]: W1121 14:46:24.261791 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod06549e5c_262d_45b3_9790_7c264bcecf3c.slice/crio-0ebd868d111172921da1da8eef8a3d2fe093c2a01fa30a26d61d8e24ba20c66e WatchSource:0}: Error finding container 0ebd868d111172921da1da8eef8a3d2fe093c2a01fa30a26d61d8e24ba20c66e: Status 404 returned error can't find the container with id 0ebd868d111172921da1da8eef8a3d2fe093c2a01fa30a26d61d8e24ba20c66e Nov 21 14:46:24 crc kubenswrapper[5133]: I1121 14:46:24.262993 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-69655fd4bf-xltwv"] Nov 21 14:46:24 crc kubenswrapper[5133]: I1121 14:46:24.457939 5133 scope.go:117] "RemoveContainer" containerID="8580d7a7211cee4078050130d68876fc21d141fe3a8eaa4aa514ada3bc5ab459" Nov 21 14:46:24 crc kubenswrapper[5133]: E1121 14:46:24.458468 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:46:24 crc kubenswrapper[5133]: W1121 14:46:24.475685 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5f5d9853_36e3_4aa0_b97e_4f5e3de627c1.slice/crio-b9e38575400872f9d4f678cf5ff597d7feb01e72f1e2d8df756072017dd4ea9a WatchSource:0}: Error finding container b9e38575400872f9d4f678cf5ff597d7feb01e72f1e2d8df756072017dd4ea9a: Status 404 returned error can't find the container with id b9e38575400872f9d4f678cf5ff597d7feb01e72f1e2d8df756072017dd4ea9a Nov 21 14:46:24 crc kubenswrapper[5133]: I1121 14:46:24.477372 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-api-0"] Nov 21 14:46:24 crc kubenswrapper[5133]: I1121 14:46:24.739137 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"25cbce04-5851-4568-b3f3-fb5eefbd4b7f","Type":"ContainerStarted","Data":"5d54fddd84d9fa9b39866a8dc43296bf8ab7207c83204363de7632c6e3383045"} Nov 21 14:46:24 crc kubenswrapper[5133]: I1121 14:46:24.742346 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"5f5d9853-36e3-4aa0-b97e-4f5e3de627c1","Type":"ContainerStarted","Data":"b9e38575400872f9d4f678cf5ff597d7feb01e72f1e2d8df756072017dd4ea9a"} Nov 21 14:46:24 crc kubenswrapper[5133]: I1121 14:46:24.745656 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-69655fd4bf-xltwv" event={"ID":"06549e5c-262d-45b3-9790-7c264bcecf3c","Type":"ContainerStarted","Data":"75050bd79457e77a390441c17ed35ec106a7a3d71416e4b0608899f1d9063df9"} Nov 21 14:46:24 crc kubenswrapper[5133]: I1121 14:46:24.745688 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-69655fd4bf-xltwv" event={"ID":"06549e5c-262d-45b3-9790-7c264bcecf3c","Type":"ContainerStarted","Data":"0ebd868d111172921da1da8eef8a3d2fe093c2a01fa30a26d61d8e24ba20c66e"} Nov 21 14:46:24 crc kubenswrapper[5133]: I1121 14:46:24.751434 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"d68c59a3-abc6-4cdd-bdfa-f3eac726fa81","Type":"ContainerStarted","Data":"8a65168923f92843875ad4b5571ba383588666664c3deaeacb81ed586418d6f1"} Nov 21 14:46:25 crc kubenswrapper[5133]: I1121 14:46:25.761678 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"5f5d9853-36e3-4aa0-b97e-4f5e3de627c1","Type":"ContainerStarted","Data":"289f9cefc6cea77f19a4fd0e30e85640b30eae1da7a06dd82a6387475af47130"} Nov 21 14:46:25 crc kubenswrapper[5133]: I1121 14:46:25.764707 5133 generic.go:334] "Generic (PLEG): container finished" podID="06549e5c-262d-45b3-9790-7c264bcecf3c" containerID="75050bd79457e77a390441c17ed35ec106a7a3d71416e4b0608899f1d9063df9" exitCode=0 Nov 21 14:46:25 crc kubenswrapper[5133]: I1121 14:46:25.764752 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-69655fd4bf-xltwv" event={"ID":"06549e5c-262d-45b3-9790-7c264bcecf3c","Type":"ContainerDied","Data":"75050bd79457e77a390441c17ed35ec106a7a3d71416e4b0608899f1d9063df9"} Nov 21 14:46:26 crc kubenswrapper[5133]: I1121 14:46:26.145920 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-api-0"] Nov 21 14:46:26 crc kubenswrapper[5133]: I1121 14:46:26.777457 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"5f5d9853-36e3-4aa0-b97e-4f5e3de627c1","Type":"ContainerStarted","Data":"38a1b134fed1d5cb470bab5af88aab4f85ec298fd97cafd33bb3f60068afc88d"} Nov 21 14:46:26 crc kubenswrapper[5133]: I1121 14:46:26.779950 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-69655fd4bf-xltwv" event={"ID":"06549e5c-262d-45b3-9790-7c264bcecf3c","Type":"ContainerStarted","Data":"1bee723bdd93bb3ee2e8796186e08fab11a6ee8293d1c8ca77eb217c5e98fa8c"} Nov 21 14:46:27 crc kubenswrapper[5133]: I1121 14:46:27.789346 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/manila-api-0" Nov 21 14:46:27 crc kubenswrapper[5133]: I1121 14:46:27.790184 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-69655fd4bf-xltwv" Nov 21 14:46:27 crc kubenswrapper[5133]: I1121 14:46:27.789586 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/manila-api-0" podUID="5f5d9853-36e3-4aa0-b97e-4f5e3de627c1" containerName="manila-api-log" containerID="cri-o://289f9cefc6cea77f19a4fd0e30e85640b30eae1da7a06dd82a6387475af47130" gracePeriod=30 Nov 21 14:46:27 crc kubenswrapper[5133]: I1121 14:46:27.789428 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/manila-api-0" podUID="5f5d9853-36e3-4aa0-b97e-4f5e3de627c1" containerName="manila-api" containerID="cri-o://38a1b134fed1d5cb470bab5af88aab4f85ec298fd97cafd33bb3f60068afc88d" gracePeriod=30 Nov 21 14:46:27 crc kubenswrapper[5133]: I1121 14:46:27.814687 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-api-0" podStartSLOduration=4.814666157 podStartE2EDuration="4.814666157s" podCreationTimestamp="2025-11-21 14:46:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 14:46:27.812103639 +0000 UTC m=+3847.609935887" watchObservedRunningTime="2025-11-21 14:46:27.814666157 +0000 UTC m=+3847.612498405" Nov 21 14:46:27 crc kubenswrapper[5133]: I1121 14:46:27.832692 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-69655fd4bf-xltwv" podStartSLOduration=4.832674744 podStartE2EDuration="4.832674744s" podCreationTimestamp="2025-11-21 14:46:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 14:46:27.829074739 +0000 UTC m=+3847.626906987" watchObservedRunningTime="2025-11-21 14:46:27.832674744 +0000 UTC m=+3847.630507002" Nov 21 14:46:28 crc kubenswrapper[5133]: I1121 14:46:28.711326 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-api-0" Nov 21 14:46:28 crc kubenswrapper[5133]: I1121 14:46:28.718215 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5f5d9853-36e3-4aa0-b97e-4f5e3de627c1-scripts\") pod \"5f5d9853-36e3-4aa0-b97e-4f5e3de627c1\" (UID: \"5f5d9853-36e3-4aa0-b97e-4f5e3de627c1\") " Nov 21 14:46:28 crc kubenswrapper[5133]: I1121 14:46:28.718260 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5f5d9853-36e3-4aa0-b97e-4f5e3de627c1-config-data\") pod \"5f5d9853-36e3-4aa0-b97e-4f5e3de627c1\" (UID: \"5f5d9853-36e3-4aa0-b97e-4f5e3de627c1\") " Nov 21 14:46:28 crc kubenswrapper[5133]: I1121 14:46:28.718305 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5f5d9853-36e3-4aa0-b97e-4f5e3de627c1-logs\") pod \"5f5d9853-36e3-4aa0-b97e-4f5e3de627c1\" (UID: \"5f5d9853-36e3-4aa0-b97e-4f5e3de627c1\") " Nov 21 14:46:28 crc kubenswrapper[5133]: I1121 14:46:28.718390 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r8lfv\" (UniqueName: \"kubernetes.io/projected/5f5d9853-36e3-4aa0-b97e-4f5e3de627c1-kube-api-access-r8lfv\") pod \"5f5d9853-36e3-4aa0-b97e-4f5e3de627c1\" (UID: \"5f5d9853-36e3-4aa0-b97e-4f5e3de627c1\") " Nov 21 14:46:28 crc kubenswrapper[5133]: I1121 14:46:28.718432 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5f5d9853-36e3-4aa0-b97e-4f5e3de627c1-etc-machine-id\") pod \"5f5d9853-36e3-4aa0-b97e-4f5e3de627c1\" (UID: \"5f5d9853-36e3-4aa0-b97e-4f5e3de627c1\") " Nov 21 14:46:28 crc kubenswrapper[5133]: I1121 14:46:28.718517 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f5d9853-36e3-4aa0-b97e-4f5e3de627c1-combined-ca-bundle\") pod \"5f5d9853-36e3-4aa0-b97e-4f5e3de627c1\" (UID: \"5f5d9853-36e3-4aa0-b97e-4f5e3de627c1\") " Nov 21 14:46:28 crc kubenswrapper[5133]: I1121 14:46:28.718552 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5f5d9853-36e3-4aa0-b97e-4f5e3de627c1-config-data-custom\") pod \"5f5d9853-36e3-4aa0-b97e-4f5e3de627c1\" (UID: \"5f5d9853-36e3-4aa0-b97e-4f5e3de627c1\") " Nov 21 14:46:28 crc kubenswrapper[5133]: I1121 14:46:28.718733 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5f5d9853-36e3-4aa0-b97e-4f5e3de627c1-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "5f5d9853-36e3-4aa0-b97e-4f5e3de627c1" (UID: "5f5d9853-36e3-4aa0-b97e-4f5e3de627c1"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 14:46:28 crc kubenswrapper[5133]: I1121 14:46:28.719054 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5f5d9853-36e3-4aa0-b97e-4f5e3de627c1-logs" (OuterVolumeSpecName: "logs") pod "5f5d9853-36e3-4aa0-b97e-4f5e3de627c1" (UID: "5f5d9853-36e3-4aa0-b97e-4f5e3de627c1"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:46:28 crc kubenswrapper[5133]: I1121 14:46:28.719570 5133 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5f5d9853-36e3-4aa0-b97e-4f5e3de627c1-logs\") on node \"crc\" DevicePath \"\"" Nov 21 14:46:28 crc kubenswrapper[5133]: I1121 14:46:28.719596 5133 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5f5d9853-36e3-4aa0-b97e-4f5e3de627c1-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 21 14:46:28 crc kubenswrapper[5133]: I1121 14:46:28.800316 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5f5d9853-36e3-4aa0-b97e-4f5e3de627c1-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "5f5d9853-36e3-4aa0-b97e-4f5e3de627c1" (UID: "5f5d9853-36e3-4aa0-b97e-4f5e3de627c1"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:46:28 crc kubenswrapper[5133]: I1121 14:46:28.800429 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5f5d9853-36e3-4aa0-b97e-4f5e3de627c1-scripts" (OuterVolumeSpecName: "scripts") pod "5f5d9853-36e3-4aa0-b97e-4f5e3de627c1" (UID: "5f5d9853-36e3-4aa0-b97e-4f5e3de627c1"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:46:28 crc kubenswrapper[5133]: I1121 14:46:28.801918 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5f5d9853-36e3-4aa0-b97e-4f5e3de627c1-kube-api-access-r8lfv" (OuterVolumeSpecName: "kube-api-access-r8lfv") pod "5f5d9853-36e3-4aa0-b97e-4f5e3de627c1" (UID: "5f5d9853-36e3-4aa0-b97e-4f5e3de627c1"). InnerVolumeSpecName "kube-api-access-r8lfv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:46:28 crc kubenswrapper[5133]: I1121 14:46:28.805716 5133 generic.go:334] "Generic (PLEG): container finished" podID="5f5d9853-36e3-4aa0-b97e-4f5e3de627c1" containerID="38a1b134fed1d5cb470bab5af88aab4f85ec298fd97cafd33bb3f60068afc88d" exitCode=0 Nov 21 14:46:28 crc kubenswrapper[5133]: I1121 14:46:28.805746 5133 generic.go:334] "Generic (PLEG): container finished" podID="5f5d9853-36e3-4aa0-b97e-4f5e3de627c1" containerID="289f9cefc6cea77f19a4fd0e30e85640b30eae1da7a06dd82a6387475af47130" exitCode=143 Nov 21 14:46:28 crc kubenswrapper[5133]: I1121 14:46:28.805876 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-api-0" Nov 21 14:46:28 crc kubenswrapper[5133]: I1121 14:46:28.805931 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"5f5d9853-36e3-4aa0-b97e-4f5e3de627c1","Type":"ContainerDied","Data":"38a1b134fed1d5cb470bab5af88aab4f85ec298fd97cafd33bb3f60068afc88d"} Nov 21 14:46:28 crc kubenswrapper[5133]: I1121 14:46:28.805972 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"5f5d9853-36e3-4aa0-b97e-4f5e3de627c1","Type":"ContainerDied","Data":"289f9cefc6cea77f19a4fd0e30e85640b30eae1da7a06dd82a6387475af47130"} Nov 21 14:46:28 crc kubenswrapper[5133]: I1121 14:46:28.805985 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"5f5d9853-36e3-4aa0-b97e-4f5e3de627c1","Type":"ContainerDied","Data":"b9e38575400872f9d4f678cf5ff597d7feb01e72f1e2d8df756072017dd4ea9a"} Nov 21 14:46:28 crc kubenswrapper[5133]: I1121 14:46:28.806449 5133 scope.go:117] "RemoveContainer" containerID="38a1b134fed1d5cb470bab5af88aab4f85ec298fd97cafd33bb3f60068afc88d" Nov 21 14:46:28 crc kubenswrapper[5133]: I1121 14:46:28.812592 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5f5d9853-36e3-4aa0-b97e-4f5e3de627c1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5f5d9853-36e3-4aa0-b97e-4f5e3de627c1" (UID: "5f5d9853-36e3-4aa0-b97e-4f5e3de627c1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:46:28 crc kubenswrapper[5133]: I1121 14:46:28.821802 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r8lfv\" (UniqueName: \"kubernetes.io/projected/5f5d9853-36e3-4aa0-b97e-4f5e3de627c1-kube-api-access-r8lfv\") on node \"crc\" DevicePath \"\"" Nov 21 14:46:28 crc kubenswrapper[5133]: I1121 14:46:28.821876 5133 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5f5d9853-36e3-4aa0-b97e-4f5e3de627c1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 14:46:28 crc kubenswrapper[5133]: I1121 14:46:28.821891 5133 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5f5d9853-36e3-4aa0-b97e-4f5e3de627c1-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 21 14:46:28 crc kubenswrapper[5133]: I1121 14:46:28.821904 5133 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5f5d9853-36e3-4aa0-b97e-4f5e3de627c1-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 14:46:28 crc kubenswrapper[5133]: I1121 14:46:28.832187 5133 scope.go:117] "RemoveContainer" containerID="289f9cefc6cea77f19a4fd0e30e85640b30eae1da7a06dd82a6387475af47130" Nov 21 14:46:28 crc kubenswrapper[5133]: I1121 14:46:28.844831 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5f5d9853-36e3-4aa0-b97e-4f5e3de627c1-config-data" (OuterVolumeSpecName: "config-data") pod "5f5d9853-36e3-4aa0-b97e-4f5e3de627c1" (UID: "5f5d9853-36e3-4aa0-b97e-4f5e3de627c1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:46:28 crc kubenswrapper[5133]: I1121 14:46:28.854405 5133 scope.go:117] "RemoveContainer" containerID="38a1b134fed1d5cb470bab5af88aab4f85ec298fd97cafd33bb3f60068afc88d" Nov 21 14:46:28 crc kubenswrapper[5133]: E1121 14:46:28.854900 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"38a1b134fed1d5cb470bab5af88aab4f85ec298fd97cafd33bb3f60068afc88d\": container with ID starting with 38a1b134fed1d5cb470bab5af88aab4f85ec298fd97cafd33bb3f60068afc88d not found: ID does not exist" containerID="38a1b134fed1d5cb470bab5af88aab4f85ec298fd97cafd33bb3f60068afc88d" Nov 21 14:46:28 crc kubenswrapper[5133]: I1121 14:46:28.854946 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"38a1b134fed1d5cb470bab5af88aab4f85ec298fd97cafd33bb3f60068afc88d"} err="failed to get container status \"38a1b134fed1d5cb470bab5af88aab4f85ec298fd97cafd33bb3f60068afc88d\": rpc error: code = NotFound desc = could not find container \"38a1b134fed1d5cb470bab5af88aab4f85ec298fd97cafd33bb3f60068afc88d\": container with ID starting with 38a1b134fed1d5cb470bab5af88aab4f85ec298fd97cafd33bb3f60068afc88d not found: ID does not exist" Nov 21 14:46:28 crc kubenswrapper[5133]: I1121 14:46:28.854972 5133 scope.go:117] "RemoveContainer" containerID="289f9cefc6cea77f19a4fd0e30e85640b30eae1da7a06dd82a6387475af47130" Nov 21 14:46:28 crc kubenswrapper[5133]: E1121 14:46:28.855438 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"289f9cefc6cea77f19a4fd0e30e85640b30eae1da7a06dd82a6387475af47130\": container with ID starting with 289f9cefc6cea77f19a4fd0e30e85640b30eae1da7a06dd82a6387475af47130 not found: ID does not exist" containerID="289f9cefc6cea77f19a4fd0e30e85640b30eae1da7a06dd82a6387475af47130" Nov 21 14:46:28 crc kubenswrapper[5133]: I1121 14:46:28.855469 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"289f9cefc6cea77f19a4fd0e30e85640b30eae1da7a06dd82a6387475af47130"} err="failed to get container status \"289f9cefc6cea77f19a4fd0e30e85640b30eae1da7a06dd82a6387475af47130\": rpc error: code = NotFound desc = could not find container \"289f9cefc6cea77f19a4fd0e30e85640b30eae1da7a06dd82a6387475af47130\": container with ID starting with 289f9cefc6cea77f19a4fd0e30e85640b30eae1da7a06dd82a6387475af47130 not found: ID does not exist" Nov 21 14:46:28 crc kubenswrapper[5133]: I1121 14:46:28.855487 5133 scope.go:117] "RemoveContainer" containerID="38a1b134fed1d5cb470bab5af88aab4f85ec298fd97cafd33bb3f60068afc88d" Nov 21 14:46:28 crc kubenswrapper[5133]: I1121 14:46:28.855896 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"38a1b134fed1d5cb470bab5af88aab4f85ec298fd97cafd33bb3f60068afc88d"} err="failed to get container status \"38a1b134fed1d5cb470bab5af88aab4f85ec298fd97cafd33bb3f60068afc88d\": rpc error: code = NotFound desc = could not find container \"38a1b134fed1d5cb470bab5af88aab4f85ec298fd97cafd33bb3f60068afc88d\": container with ID starting with 38a1b134fed1d5cb470bab5af88aab4f85ec298fd97cafd33bb3f60068afc88d not found: ID does not exist" Nov 21 14:46:28 crc kubenswrapper[5133]: I1121 14:46:28.855923 5133 scope.go:117] "RemoveContainer" containerID="289f9cefc6cea77f19a4fd0e30e85640b30eae1da7a06dd82a6387475af47130" Nov 21 14:46:28 crc kubenswrapper[5133]: I1121 14:46:28.856457 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"289f9cefc6cea77f19a4fd0e30e85640b30eae1da7a06dd82a6387475af47130"} err="failed to get container status \"289f9cefc6cea77f19a4fd0e30e85640b30eae1da7a06dd82a6387475af47130\": rpc error: code = NotFound desc = could not find container \"289f9cefc6cea77f19a4fd0e30e85640b30eae1da7a06dd82a6387475af47130\": container with ID starting with 289f9cefc6cea77f19a4fd0e30e85640b30eae1da7a06dd82a6387475af47130 not found: ID does not exist" Nov 21 14:46:28 crc kubenswrapper[5133]: I1121 14:46:28.923271 5133 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5f5d9853-36e3-4aa0-b97e-4f5e3de627c1-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 14:46:29 crc kubenswrapper[5133]: I1121 14:46:29.242821 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-api-0"] Nov 21 14:46:29 crc kubenswrapper[5133]: I1121 14:46:29.265152 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/manila-api-0"] Nov 21 14:46:29 crc kubenswrapper[5133]: I1121 14:46:29.286433 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-api-0"] Nov 21 14:46:29 crc kubenswrapper[5133]: E1121 14:46:29.286951 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f5d9853-36e3-4aa0-b97e-4f5e3de627c1" containerName="manila-api-log" Nov 21 14:46:29 crc kubenswrapper[5133]: I1121 14:46:29.286968 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f5d9853-36e3-4aa0-b97e-4f5e3de627c1" containerName="manila-api-log" Nov 21 14:46:29 crc kubenswrapper[5133]: E1121 14:46:29.287039 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f5d9853-36e3-4aa0-b97e-4f5e3de627c1" containerName="manila-api" Nov 21 14:46:29 crc kubenswrapper[5133]: I1121 14:46:29.287047 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f5d9853-36e3-4aa0-b97e-4f5e3de627c1" containerName="manila-api" Nov 21 14:46:29 crc kubenswrapper[5133]: I1121 14:46:29.287254 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="5f5d9853-36e3-4aa0-b97e-4f5e3de627c1" containerName="manila-api" Nov 21 14:46:29 crc kubenswrapper[5133]: I1121 14:46:29.287276 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="5f5d9853-36e3-4aa0-b97e-4f5e3de627c1" containerName="manila-api-log" Nov 21 14:46:29 crc kubenswrapper[5133]: I1121 14:46:29.288368 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-api-0" Nov 21 14:46:29 crc kubenswrapper[5133]: I1121 14:46:29.303797 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-api-0"] Nov 21 14:46:29 crc kubenswrapper[5133]: I1121 14:46:29.309560 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-manila-public-svc" Nov 21 14:46:29 crc kubenswrapper[5133]: I1121 14:46:29.309752 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-manila-internal-svc" Nov 21 14:46:29 crc kubenswrapper[5133]: I1121 14:46:29.309836 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-api-config-data" Nov 21 14:46:29 crc kubenswrapper[5133]: I1121 14:46:29.314268 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 21 14:46:29 crc kubenswrapper[5133]: I1121 14:46:29.314640 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="dfdce579-9d1a-4826-9d21-bf9061ab6c01" containerName="ceilometer-central-agent" containerID="cri-o://d95c9f7aecd2f6f2e77f4ac97e538d655ac6060ec10fc8d6debef215c7b39153" gracePeriod=30 Nov 21 14:46:29 crc kubenswrapper[5133]: I1121 14:46:29.314808 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="dfdce579-9d1a-4826-9d21-bf9061ab6c01" containerName="proxy-httpd" containerID="cri-o://6850ac4ba9ac559104d16b42b27615df5159483ea6e1c699bc033aec509a7794" gracePeriod=30 Nov 21 14:46:29 crc kubenswrapper[5133]: I1121 14:46:29.314867 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="dfdce579-9d1a-4826-9d21-bf9061ab6c01" containerName="sg-core" containerID="cri-o://3233b4e6edd00394373bbcbbe808a291cdd2e5077ae6b17bc26bed14d1a8616c" gracePeriod=30 Nov 21 14:46:29 crc kubenswrapper[5133]: I1121 14:46:29.314912 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="dfdce579-9d1a-4826-9d21-bf9061ab6c01" containerName="ceilometer-notification-agent" containerID="cri-o://e2e2f5cc83cb84ec3f7586dd5528c19097c987692643ab0877baa98e798b2010" gracePeriod=30 Nov 21 14:46:29 crc kubenswrapper[5133]: I1121 14:46:29.354430 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/087e2ee4-bb11-426d-b624-2240837ee732-config-data-custom\") pod \"manila-api-0\" (UID: \"087e2ee4-bb11-426d-b624-2240837ee732\") " pod="openstack/manila-api-0" Nov 21 14:46:29 crc kubenswrapper[5133]: I1121 14:46:29.354498 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/087e2ee4-bb11-426d-b624-2240837ee732-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"087e2ee4-bb11-426d-b624-2240837ee732\") " pod="openstack/manila-api-0" Nov 21 14:46:29 crc kubenswrapper[5133]: I1121 14:46:29.354541 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/087e2ee4-bb11-426d-b624-2240837ee732-config-data\") pod \"manila-api-0\" (UID: \"087e2ee4-bb11-426d-b624-2240837ee732\") " pod="openstack/manila-api-0" Nov 21 14:46:29 crc kubenswrapper[5133]: I1121 14:46:29.354559 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/087e2ee4-bb11-426d-b624-2240837ee732-scripts\") pod \"manila-api-0\" (UID: \"087e2ee4-bb11-426d-b624-2240837ee732\") " pod="openstack/manila-api-0" Nov 21 14:46:29 crc kubenswrapper[5133]: I1121 14:46:29.354604 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/087e2ee4-bb11-426d-b624-2240837ee732-internal-tls-certs\") pod \"manila-api-0\" (UID: \"087e2ee4-bb11-426d-b624-2240837ee732\") " pod="openstack/manila-api-0" Nov 21 14:46:29 crc kubenswrapper[5133]: I1121 14:46:29.354770 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/087e2ee4-bb11-426d-b624-2240837ee732-public-tls-certs\") pod \"manila-api-0\" (UID: \"087e2ee4-bb11-426d-b624-2240837ee732\") " pod="openstack/manila-api-0" Nov 21 14:46:29 crc kubenswrapper[5133]: I1121 14:46:29.354905 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/087e2ee4-bb11-426d-b624-2240837ee732-logs\") pod \"manila-api-0\" (UID: \"087e2ee4-bb11-426d-b624-2240837ee732\") " pod="openstack/manila-api-0" Nov 21 14:46:29 crc kubenswrapper[5133]: I1121 14:46:29.354983 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/087e2ee4-bb11-426d-b624-2240837ee732-etc-machine-id\") pod \"manila-api-0\" (UID: \"087e2ee4-bb11-426d-b624-2240837ee732\") " pod="openstack/manila-api-0" Nov 21 14:46:29 crc kubenswrapper[5133]: I1121 14:46:29.355081 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w8w6l\" (UniqueName: \"kubernetes.io/projected/087e2ee4-bb11-426d-b624-2240837ee732-kube-api-access-w8w6l\") pod \"manila-api-0\" (UID: \"087e2ee4-bb11-426d-b624-2240837ee732\") " pod="openstack/manila-api-0" Nov 21 14:46:29 crc kubenswrapper[5133]: I1121 14:46:29.456829 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/087e2ee4-bb11-426d-b624-2240837ee732-config-data\") pod \"manila-api-0\" (UID: \"087e2ee4-bb11-426d-b624-2240837ee732\") " pod="openstack/manila-api-0" Nov 21 14:46:29 crc kubenswrapper[5133]: I1121 14:46:29.457166 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/087e2ee4-bb11-426d-b624-2240837ee732-scripts\") pod \"manila-api-0\" (UID: \"087e2ee4-bb11-426d-b624-2240837ee732\") " pod="openstack/manila-api-0" Nov 21 14:46:29 crc kubenswrapper[5133]: I1121 14:46:29.457434 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/087e2ee4-bb11-426d-b624-2240837ee732-internal-tls-certs\") pod \"manila-api-0\" (UID: \"087e2ee4-bb11-426d-b624-2240837ee732\") " pod="openstack/manila-api-0" Nov 21 14:46:29 crc kubenswrapper[5133]: I1121 14:46:29.457572 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/087e2ee4-bb11-426d-b624-2240837ee732-public-tls-certs\") pod \"manila-api-0\" (UID: \"087e2ee4-bb11-426d-b624-2240837ee732\") " pod="openstack/manila-api-0" Nov 21 14:46:29 crc kubenswrapper[5133]: I1121 14:46:29.457731 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/087e2ee4-bb11-426d-b624-2240837ee732-logs\") pod \"manila-api-0\" (UID: \"087e2ee4-bb11-426d-b624-2240837ee732\") " pod="openstack/manila-api-0" Nov 21 14:46:29 crc kubenswrapper[5133]: I1121 14:46:29.457886 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/087e2ee4-bb11-426d-b624-2240837ee732-etc-machine-id\") pod \"manila-api-0\" (UID: \"087e2ee4-bb11-426d-b624-2240837ee732\") " pod="openstack/manila-api-0" Nov 21 14:46:29 crc kubenswrapper[5133]: I1121 14:46:29.458032 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w8w6l\" (UniqueName: \"kubernetes.io/projected/087e2ee4-bb11-426d-b624-2240837ee732-kube-api-access-w8w6l\") pod \"manila-api-0\" (UID: \"087e2ee4-bb11-426d-b624-2240837ee732\") " pod="openstack/manila-api-0" Nov 21 14:46:29 crc kubenswrapper[5133]: I1121 14:46:29.458143 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/087e2ee4-bb11-426d-b624-2240837ee732-logs\") pod \"manila-api-0\" (UID: \"087e2ee4-bb11-426d-b624-2240837ee732\") " pod="openstack/manila-api-0" Nov 21 14:46:29 crc kubenswrapper[5133]: I1121 14:46:29.457937 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/087e2ee4-bb11-426d-b624-2240837ee732-etc-machine-id\") pod \"manila-api-0\" (UID: \"087e2ee4-bb11-426d-b624-2240837ee732\") " pod="openstack/manila-api-0" Nov 21 14:46:29 crc kubenswrapper[5133]: I1121 14:46:29.458449 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/087e2ee4-bb11-426d-b624-2240837ee732-config-data-custom\") pod \"manila-api-0\" (UID: \"087e2ee4-bb11-426d-b624-2240837ee732\") " pod="openstack/manila-api-0" Nov 21 14:46:29 crc kubenswrapper[5133]: I1121 14:46:29.458517 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/087e2ee4-bb11-426d-b624-2240837ee732-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"087e2ee4-bb11-426d-b624-2240837ee732\") " pod="openstack/manila-api-0" Nov 21 14:46:29 crc kubenswrapper[5133]: I1121 14:46:29.464671 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/087e2ee4-bb11-426d-b624-2240837ee732-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"087e2ee4-bb11-426d-b624-2240837ee732\") " pod="openstack/manila-api-0" Nov 21 14:46:29 crc kubenswrapper[5133]: I1121 14:46:29.465618 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/087e2ee4-bb11-426d-b624-2240837ee732-config-data-custom\") pod \"manila-api-0\" (UID: \"087e2ee4-bb11-426d-b624-2240837ee732\") " pod="openstack/manila-api-0" Nov 21 14:46:29 crc kubenswrapper[5133]: I1121 14:46:29.466340 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/087e2ee4-bb11-426d-b624-2240837ee732-internal-tls-certs\") pod \"manila-api-0\" (UID: \"087e2ee4-bb11-426d-b624-2240837ee732\") " pod="openstack/manila-api-0" Nov 21 14:46:29 crc kubenswrapper[5133]: I1121 14:46:29.468428 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/087e2ee4-bb11-426d-b624-2240837ee732-scripts\") pod \"manila-api-0\" (UID: \"087e2ee4-bb11-426d-b624-2240837ee732\") " pod="openstack/manila-api-0" Nov 21 14:46:29 crc kubenswrapper[5133]: I1121 14:46:29.474428 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/087e2ee4-bb11-426d-b624-2240837ee732-public-tls-certs\") pod \"manila-api-0\" (UID: \"087e2ee4-bb11-426d-b624-2240837ee732\") " pod="openstack/manila-api-0" Nov 21 14:46:29 crc kubenswrapper[5133]: I1121 14:46:29.478023 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/087e2ee4-bb11-426d-b624-2240837ee732-config-data\") pod \"manila-api-0\" (UID: \"087e2ee4-bb11-426d-b624-2240837ee732\") " pod="openstack/manila-api-0" Nov 21 14:46:29 crc kubenswrapper[5133]: I1121 14:46:29.481643 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w8w6l\" (UniqueName: \"kubernetes.io/projected/087e2ee4-bb11-426d-b624-2240837ee732-kube-api-access-w8w6l\") pod \"manila-api-0\" (UID: \"087e2ee4-bb11-426d-b624-2240837ee732\") " pod="openstack/manila-api-0" Nov 21 14:46:29 crc kubenswrapper[5133]: I1121 14:46:29.626317 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-api-0" Nov 21 14:46:29 crc kubenswrapper[5133]: I1121 14:46:29.816631 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"25cbce04-5851-4568-b3f3-fb5eefbd4b7f","Type":"ContainerStarted","Data":"49ec7803a188c49e22750d57be3fe55e46ebe69e20ab689d34554888314cb8f5"} Nov 21 14:46:29 crc kubenswrapper[5133]: I1121 14:46:29.821989 5133 generic.go:334] "Generic (PLEG): container finished" podID="dfdce579-9d1a-4826-9d21-bf9061ab6c01" containerID="6850ac4ba9ac559104d16b42b27615df5159483ea6e1c699bc033aec509a7794" exitCode=0 Nov 21 14:46:29 crc kubenswrapper[5133]: I1121 14:46:29.822040 5133 generic.go:334] "Generic (PLEG): container finished" podID="dfdce579-9d1a-4826-9d21-bf9061ab6c01" containerID="3233b4e6edd00394373bbcbbe808a291cdd2e5077ae6b17bc26bed14d1a8616c" exitCode=2 Nov 21 14:46:29 crc kubenswrapper[5133]: I1121 14:46:29.822067 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"dfdce579-9d1a-4826-9d21-bf9061ab6c01","Type":"ContainerDied","Data":"6850ac4ba9ac559104d16b42b27615df5159483ea6e1c699bc033aec509a7794"} Nov 21 14:46:29 crc kubenswrapper[5133]: I1121 14:46:29.822098 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"dfdce579-9d1a-4826-9d21-bf9061ab6c01","Type":"ContainerDied","Data":"3233b4e6edd00394373bbcbbe808a291cdd2e5077ae6b17bc26bed14d1a8616c"} Nov 21 14:46:30 crc kubenswrapper[5133]: I1121 14:46:30.472398 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5f5d9853-36e3-4aa0-b97e-4f5e3de627c1" path="/var/lib/kubelet/pods/5f5d9853-36e3-4aa0-b97e-4f5e3de627c1/volumes" Nov 21 14:46:30 crc kubenswrapper[5133]: I1121 14:46:30.533084 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-api-0"] Nov 21 14:46:30 crc kubenswrapper[5133]: W1121 14:46:30.545185 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod087e2ee4_bb11_426d_b624_2240837ee732.slice/crio-c4b7721537a2812bbc0500c9172ae862fa475ff4b64fcb7e1bac5d2f169bd806 WatchSource:0}: Error finding container c4b7721537a2812bbc0500c9172ae862fa475ff4b64fcb7e1bac5d2f169bd806: Status 404 returned error can't find the container with id c4b7721537a2812bbc0500c9172ae862fa475ff4b64fcb7e1bac5d2f169bd806 Nov 21 14:46:30 crc kubenswrapper[5133]: I1121 14:46:30.841802 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"087e2ee4-bb11-426d-b624-2240837ee732","Type":"ContainerStarted","Data":"c4b7721537a2812bbc0500c9172ae862fa475ff4b64fcb7e1bac5d2f169bd806"} Nov 21 14:46:30 crc kubenswrapper[5133]: I1121 14:46:30.846239 5133 generic.go:334] "Generic (PLEG): container finished" podID="dfdce579-9d1a-4826-9d21-bf9061ab6c01" containerID="e2e2f5cc83cb84ec3f7586dd5528c19097c987692643ab0877baa98e798b2010" exitCode=0 Nov 21 14:46:30 crc kubenswrapper[5133]: I1121 14:46:30.846695 5133 generic.go:334] "Generic (PLEG): container finished" podID="dfdce579-9d1a-4826-9d21-bf9061ab6c01" containerID="d95c9f7aecd2f6f2e77f4ac97e538d655ac6060ec10fc8d6debef215c7b39153" exitCode=0 Nov 21 14:46:30 crc kubenswrapper[5133]: I1121 14:46:30.846737 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"dfdce579-9d1a-4826-9d21-bf9061ab6c01","Type":"ContainerDied","Data":"e2e2f5cc83cb84ec3f7586dd5528c19097c987692643ab0877baa98e798b2010"} Nov 21 14:46:30 crc kubenswrapper[5133]: I1121 14:46:30.846770 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"dfdce579-9d1a-4826-9d21-bf9061ab6c01","Type":"ContainerDied","Data":"d95c9f7aecd2f6f2e77f4ac97e538d655ac6060ec10fc8d6debef215c7b39153"} Nov 21 14:46:31 crc kubenswrapper[5133]: I1121 14:46:31.855966 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"25cbce04-5851-4568-b3f3-fb5eefbd4b7f","Type":"ContainerStarted","Data":"63389e8539fe33a8bc102236e3111eadb356ac37452308b9ce1f34eff75b8fd4"} Nov 21 14:46:32 crc kubenswrapper[5133]: I1121 14:46:32.865642 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"087e2ee4-bb11-426d-b624-2240837ee732","Type":"ContainerStarted","Data":"4b578faab62125ec42071d19114cf1b75a8a722dc6f6b085667f322c057ac48f"} Nov 21 14:46:32 crc kubenswrapper[5133]: I1121 14:46:32.897210 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-scheduler-0" podStartSLOduration=6.034430802 podStartE2EDuration="9.897183031s" podCreationTimestamp="2025-11-21 14:46:23 +0000 UTC" firstStartedPulling="2025-11-21 14:46:24.027920142 +0000 UTC m=+3843.825752390" lastFinishedPulling="2025-11-21 14:46:27.890672361 +0000 UTC m=+3847.688504619" observedRunningTime="2025-11-21 14:46:32.889260481 +0000 UTC m=+3852.687092729" watchObservedRunningTime="2025-11-21 14:46:32.897183031 +0000 UTC m=+3852.695015289" Nov 21 14:46:33 crc kubenswrapper[5133]: I1121 14:46:33.390112 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/manila-scheduler-0" Nov 21 14:46:33 crc kubenswrapper[5133]: I1121 14:46:33.527196 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-69655fd4bf-xltwv" Nov 21 14:46:33 crc kubenswrapper[5133]: I1121 14:46:33.592115 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-fbc59fbb7-dcjpl"] Nov 21 14:46:33 crc kubenswrapper[5133]: I1121 14:46:33.592833 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-fbc59fbb7-dcjpl" podUID="74c8b0ac-4ff7-4fea-bff7-7a6d9356e745" containerName="dnsmasq-dns" containerID="cri-o://54c3b38ae97cca8f97c9c0637bb132924af3ffc49699b75bf620afbf5c0b5b21" gracePeriod=10 Nov 21 14:46:34 crc kubenswrapper[5133]: I1121 14:46:34.888483 5133 generic.go:334] "Generic (PLEG): container finished" podID="74c8b0ac-4ff7-4fea-bff7-7a6d9356e745" containerID="54c3b38ae97cca8f97c9c0637bb132924af3ffc49699b75bf620afbf5c0b5b21" exitCode=0 Nov 21 14:46:34 crc kubenswrapper[5133]: I1121 14:46:34.888533 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fbc59fbb7-dcjpl" event={"ID":"74c8b0ac-4ff7-4fea-bff7-7a6d9356e745","Type":"ContainerDied","Data":"54c3b38ae97cca8f97c9c0637bb132924af3ffc49699b75bf620afbf5c0b5b21"} Nov 21 14:46:34 crc kubenswrapper[5133]: I1121 14:46:34.903557 5133 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-fbc59fbb7-dcjpl" podUID="74c8b0ac-4ff7-4fea-bff7-7a6d9356e745" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.190:5353: connect: connection refused" Nov 21 14:46:35 crc kubenswrapper[5133]: I1121 14:46:35.844811 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 14:46:35 crc kubenswrapper[5133]: I1121 14:46:35.918848 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"dfdce579-9d1a-4826-9d21-bf9061ab6c01","Type":"ContainerDied","Data":"c93cf46efdaa66a4a4fdd17f0ba941c4ad7a6b4c855bc1ab66decc4e74080ead"} Nov 21 14:46:35 crc kubenswrapper[5133]: I1121 14:46:35.919144 5133 scope.go:117] "RemoveContainer" containerID="6850ac4ba9ac559104d16b42b27615df5159483ea6e1c699bc033aec509a7794" Nov 21 14:46:35 crc kubenswrapper[5133]: I1121 14:46:35.918912 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 14:46:35 crc kubenswrapper[5133]: I1121 14:46:35.981451 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dfdce579-9d1a-4826-9d21-bf9061ab6c01-scripts\") pod \"dfdce579-9d1a-4826-9d21-bf9061ab6c01\" (UID: \"dfdce579-9d1a-4826-9d21-bf9061ab6c01\") " Nov 21 14:46:35 crc kubenswrapper[5133]: I1121 14:46:35.981612 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/dfdce579-9d1a-4826-9d21-bf9061ab6c01-sg-core-conf-yaml\") pod \"dfdce579-9d1a-4826-9d21-bf9061ab6c01\" (UID: \"dfdce579-9d1a-4826-9d21-bf9061ab6c01\") " Nov 21 14:46:35 crc kubenswrapper[5133]: I1121 14:46:35.981753 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dfdce579-9d1a-4826-9d21-bf9061ab6c01-combined-ca-bundle\") pod \"dfdce579-9d1a-4826-9d21-bf9061ab6c01\" (UID: \"dfdce579-9d1a-4826-9d21-bf9061ab6c01\") " Nov 21 14:46:35 crc kubenswrapper[5133]: I1121 14:46:35.981792 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-thdh5\" (UniqueName: \"kubernetes.io/projected/dfdce579-9d1a-4826-9d21-bf9061ab6c01-kube-api-access-thdh5\") pod \"dfdce579-9d1a-4826-9d21-bf9061ab6c01\" (UID: \"dfdce579-9d1a-4826-9d21-bf9061ab6c01\") " Nov 21 14:46:35 crc kubenswrapper[5133]: I1121 14:46:35.981836 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dfdce579-9d1a-4826-9d21-bf9061ab6c01-config-data\") pod \"dfdce579-9d1a-4826-9d21-bf9061ab6c01\" (UID: \"dfdce579-9d1a-4826-9d21-bf9061ab6c01\") " Nov 21 14:46:35 crc kubenswrapper[5133]: I1121 14:46:35.981868 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dfdce579-9d1a-4826-9d21-bf9061ab6c01-run-httpd\") pod \"dfdce579-9d1a-4826-9d21-bf9061ab6c01\" (UID: \"dfdce579-9d1a-4826-9d21-bf9061ab6c01\") " Nov 21 14:46:35 crc kubenswrapper[5133]: I1121 14:46:35.981889 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dfdce579-9d1a-4826-9d21-bf9061ab6c01-log-httpd\") pod \"dfdce579-9d1a-4826-9d21-bf9061ab6c01\" (UID: \"dfdce579-9d1a-4826-9d21-bf9061ab6c01\") " Nov 21 14:46:35 crc kubenswrapper[5133]: I1121 14:46:35.981976 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/dfdce579-9d1a-4826-9d21-bf9061ab6c01-ceilometer-tls-certs\") pod \"dfdce579-9d1a-4826-9d21-bf9061ab6c01\" (UID: \"dfdce579-9d1a-4826-9d21-bf9061ab6c01\") " Nov 21 14:46:35 crc kubenswrapper[5133]: I1121 14:46:35.982269 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dfdce579-9d1a-4826-9d21-bf9061ab6c01-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "dfdce579-9d1a-4826-9d21-bf9061ab6c01" (UID: "dfdce579-9d1a-4826-9d21-bf9061ab6c01"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:46:35 crc kubenswrapper[5133]: I1121 14:46:35.982592 5133 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dfdce579-9d1a-4826-9d21-bf9061ab6c01-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 21 14:46:35 crc kubenswrapper[5133]: I1121 14:46:35.983052 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dfdce579-9d1a-4826-9d21-bf9061ab6c01-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "dfdce579-9d1a-4826-9d21-bf9061ab6c01" (UID: "dfdce579-9d1a-4826-9d21-bf9061ab6c01"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:46:35 crc kubenswrapper[5133]: I1121 14:46:35.999149 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dfdce579-9d1a-4826-9d21-bf9061ab6c01-scripts" (OuterVolumeSpecName: "scripts") pod "dfdce579-9d1a-4826-9d21-bf9061ab6c01" (UID: "dfdce579-9d1a-4826-9d21-bf9061ab6c01"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.015661 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dfdce579-9d1a-4826-9d21-bf9061ab6c01-kube-api-access-thdh5" (OuterVolumeSpecName: "kube-api-access-thdh5") pod "dfdce579-9d1a-4826-9d21-bf9061ab6c01" (UID: "dfdce579-9d1a-4826-9d21-bf9061ab6c01"). InnerVolumeSpecName "kube-api-access-thdh5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.080848 5133 scope.go:117] "RemoveContainer" containerID="3233b4e6edd00394373bbcbbe808a291cdd2e5077ae6b17bc26bed14d1a8616c" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.085526 5133 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dfdce579-9d1a-4826-9d21-bf9061ab6c01-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.085543 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-thdh5\" (UniqueName: \"kubernetes.io/projected/dfdce579-9d1a-4826-9d21-bf9061ab6c01-kube-api-access-thdh5\") on node \"crc\" DevicePath \"\"" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.085552 5133 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dfdce579-9d1a-4826-9d21-bf9061ab6c01-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.114132 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dfdce579-9d1a-4826-9d21-bf9061ab6c01-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "dfdce579-9d1a-4826-9d21-bf9061ab6c01" (UID: "dfdce579-9d1a-4826-9d21-bf9061ab6c01"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.141606 5133 scope.go:117] "RemoveContainer" containerID="e2e2f5cc83cb84ec3f7586dd5528c19097c987692643ab0877baa98e798b2010" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.201518 5133 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dfdce579-9d1a-4826-9d21-bf9061ab6c01-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.210845 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dfdce579-9d1a-4826-9d21-bf9061ab6c01-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "dfdce579-9d1a-4826-9d21-bf9061ab6c01" (UID: "dfdce579-9d1a-4826-9d21-bf9061ab6c01"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.224982 5133 scope.go:117] "RemoveContainer" containerID="d95c9f7aecd2f6f2e77f4ac97e538d655ac6060ec10fc8d6debef215c7b39153" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.242124 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dfdce579-9d1a-4826-9d21-bf9061ab6c01-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "dfdce579-9d1a-4826-9d21-bf9061ab6c01" (UID: "dfdce579-9d1a-4826-9d21-bf9061ab6c01"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.258212 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dfdce579-9d1a-4826-9d21-bf9061ab6c01-config-data" (OuterVolumeSpecName: "config-data") pod "dfdce579-9d1a-4826-9d21-bf9061ab6c01" (UID: "dfdce579-9d1a-4826-9d21-bf9061ab6c01"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.263582 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-fbc59fbb7-dcjpl" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.304339 5133 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/dfdce579-9d1a-4826-9d21-bf9061ab6c01-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.304722 5133 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dfdce579-9d1a-4826-9d21-bf9061ab6c01-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.304736 5133 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/dfdce579-9d1a-4826-9d21-bf9061ab6c01-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.406414 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/74c8b0ac-4ff7-4fea-bff7-7a6d9356e745-openstack-edpm-ipam\") pod \"74c8b0ac-4ff7-4fea-bff7-7a6d9356e745\" (UID: \"74c8b0ac-4ff7-4fea-bff7-7a6d9356e745\") " Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.406569 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/74c8b0ac-4ff7-4fea-bff7-7a6d9356e745-ovsdbserver-sb\") pod \"74c8b0ac-4ff7-4fea-bff7-7a6d9356e745\" (UID: \"74c8b0ac-4ff7-4fea-bff7-7a6d9356e745\") " Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.406625 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/74c8b0ac-4ff7-4fea-bff7-7a6d9356e745-ovsdbserver-nb\") pod \"74c8b0ac-4ff7-4fea-bff7-7a6d9356e745\" (UID: \"74c8b0ac-4ff7-4fea-bff7-7a6d9356e745\") " Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.406718 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/74c8b0ac-4ff7-4fea-bff7-7a6d9356e745-dns-svc\") pod \"74c8b0ac-4ff7-4fea-bff7-7a6d9356e745\" (UID: \"74c8b0ac-4ff7-4fea-bff7-7a6d9356e745\") " Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.406752 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/74c8b0ac-4ff7-4fea-bff7-7a6d9356e745-config\") pod \"74c8b0ac-4ff7-4fea-bff7-7a6d9356e745\" (UID: \"74c8b0ac-4ff7-4fea-bff7-7a6d9356e745\") " Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.406787 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-59pqc\" (UniqueName: \"kubernetes.io/projected/74c8b0ac-4ff7-4fea-bff7-7a6d9356e745-kube-api-access-59pqc\") pod \"74c8b0ac-4ff7-4fea-bff7-7a6d9356e745\" (UID: \"74c8b0ac-4ff7-4fea-bff7-7a6d9356e745\") " Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.419377 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/74c8b0ac-4ff7-4fea-bff7-7a6d9356e745-kube-api-access-59pqc" (OuterVolumeSpecName: "kube-api-access-59pqc") pod "74c8b0ac-4ff7-4fea-bff7-7a6d9356e745" (UID: "74c8b0ac-4ff7-4fea-bff7-7a6d9356e745"). InnerVolumeSpecName "kube-api-access-59pqc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.459443 5133 scope.go:117] "RemoveContainer" containerID="8580d7a7211cee4078050130d68876fc21d141fe3a8eaa4aa514ada3bc5ab459" Nov 21 14:46:36 crc kubenswrapper[5133]: E1121 14:46:36.459817 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.484736 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/74c8b0ac-4ff7-4fea-bff7-7a6d9356e745-openstack-edpm-ipam" (OuterVolumeSpecName: "openstack-edpm-ipam") pod "74c8b0ac-4ff7-4fea-bff7-7a6d9356e745" (UID: "74c8b0ac-4ff7-4fea-bff7-7a6d9356e745"). InnerVolumeSpecName "openstack-edpm-ipam". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.486682 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/74c8b0ac-4ff7-4fea-bff7-7a6d9356e745-config" (OuterVolumeSpecName: "config") pod "74c8b0ac-4ff7-4fea-bff7-7a6d9356e745" (UID: "74c8b0ac-4ff7-4fea-bff7-7a6d9356e745"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.494145 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/74c8b0ac-4ff7-4fea-bff7-7a6d9356e745-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "74c8b0ac-4ff7-4fea-bff7-7a6d9356e745" (UID: "74c8b0ac-4ff7-4fea-bff7-7a6d9356e745"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.494452 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/74c8b0ac-4ff7-4fea-bff7-7a6d9356e745-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "74c8b0ac-4ff7-4fea-bff7-7a6d9356e745" (UID: "74c8b0ac-4ff7-4fea-bff7-7a6d9356e745"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.510046 5133 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/74c8b0ac-4ff7-4fea-bff7-7a6d9356e745-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.510081 5133 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/74c8b0ac-4ff7-4fea-bff7-7a6d9356e745-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.510092 5133 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/74c8b0ac-4ff7-4fea-bff7-7a6d9356e745-config\") on node \"crc\" DevicePath \"\"" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.510103 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-59pqc\" (UniqueName: \"kubernetes.io/projected/74c8b0ac-4ff7-4fea-bff7-7a6d9356e745-kube-api-access-59pqc\") on node \"crc\" DevicePath \"\"" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.510122 5133 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/74c8b0ac-4ff7-4fea-bff7-7a6d9356e745-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.510699 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/74c8b0ac-4ff7-4fea-bff7-7a6d9356e745-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "74c8b0ac-4ff7-4fea-bff7-7a6d9356e745" (UID: "74c8b0ac-4ff7-4fea-bff7-7a6d9356e745"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.558545 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.580397 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.601089 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 21 14:46:36 crc kubenswrapper[5133]: E1121 14:46:36.601720 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="74c8b0ac-4ff7-4fea-bff7-7a6d9356e745" containerName="dnsmasq-dns" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.601806 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="74c8b0ac-4ff7-4fea-bff7-7a6d9356e745" containerName="dnsmasq-dns" Nov 21 14:46:36 crc kubenswrapper[5133]: E1121 14:46:36.601872 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dfdce579-9d1a-4826-9d21-bf9061ab6c01" containerName="proxy-httpd" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.601956 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="dfdce579-9d1a-4826-9d21-bf9061ab6c01" containerName="proxy-httpd" Nov 21 14:46:36 crc kubenswrapper[5133]: E1121 14:46:36.602069 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="74c8b0ac-4ff7-4fea-bff7-7a6d9356e745" containerName="init" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.602144 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="74c8b0ac-4ff7-4fea-bff7-7a6d9356e745" containerName="init" Nov 21 14:46:36 crc kubenswrapper[5133]: E1121 14:46:36.602214 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dfdce579-9d1a-4826-9d21-bf9061ab6c01" containerName="ceilometer-notification-agent" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.602268 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="dfdce579-9d1a-4826-9d21-bf9061ab6c01" containerName="ceilometer-notification-agent" Nov 21 14:46:36 crc kubenswrapper[5133]: E1121 14:46:36.602331 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dfdce579-9d1a-4826-9d21-bf9061ab6c01" containerName="ceilometer-central-agent" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.602390 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="dfdce579-9d1a-4826-9d21-bf9061ab6c01" containerName="ceilometer-central-agent" Nov 21 14:46:36 crc kubenswrapper[5133]: E1121 14:46:36.602447 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dfdce579-9d1a-4826-9d21-bf9061ab6c01" containerName="sg-core" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.602503 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="dfdce579-9d1a-4826-9d21-bf9061ab6c01" containerName="sg-core" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.602750 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="dfdce579-9d1a-4826-9d21-bf9061ab6c01" containerName="proxy-httpd" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.602842 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="dfdce579-9d1a-4826-9d21-bf9061ab6c01" containerName="ceilometer-notification-agent" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.602924 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="74c8b0ac-4ff7-4fea-bff7-7a6d9356e745" containerName="dnsmasq-dns" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.603032 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="dfdce579-9d1a-4826-9d21-bf9061ab6c01" containerName="ceilometer-central-agent" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.603125 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="dfdce579-9d1a-4826-9d21-bf9061ab6c01" containerName="sg-core" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.605367 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.612500 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.612801 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.612961 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.614717 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c02422b4-72e1-4f4d-8efe-b4460618f820-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c02422b4-72e1-4f4d-8efe-b4460618f820\") " pod="openstack/ceilometer-0" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.614905 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c02422b4-72e1-4f4d-8efe-b4460618f820-scripts\") pod \"ceilometer-0\" (UID: \"c02422b4-72e1-4f4d-8efe-b4460618f820\") " pod="openstack/ceilometer-0" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.615041 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c02422b4-72e1-4f4d-8efe-b4460618f820-config-data\") pod \"ceilometer-0\" (UID: \"c02422b4-72e1-4f4d-8efe-b4460618f820\") " pod="openstack/ceilometer-0" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.615204 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7xnb8\" (UniqueName: \"kubernetes.io/projected/c02422b4-72e1-4f4d-8efe-b4460618f820-kube-api-access-7xnb8\") pod \"ceilometer-0\" (UID: \"c02422b4-72e1-4f4d-8efe-b4460618f820\") " pod="openstack/ceilometer-0" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.615318 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c02422b4-72e1-4f4d-8efe-b4460618f820-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c02422b4-72e1-4f4d-8efe-b4460618f820\") " pod="openstack/ceilometer-0" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.615455 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/c02422b4-72e1-4f4d-8efe-b4460618f820-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"c02422b4-72e1-4f4d-8efe-b4460618f820\") " pod="openstack/ceilometer-0" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.615553 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c02422b4-72e1-4f4d-8efe-b4460618f820-log-httpd\") pod \"ceilometer-0\" (UID: \"c02422b4-72e1-4f4d-8efe-b4460618f820\") " pod="openstack/ceilometer-0" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.615734 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c02422b4-72e1-4f4d-8efe-b4460618f820-run-httpd\") pod \"ceilometer-0\" (UID: \"c02422b4-72e1-4f4d-8efe-b4460618f820\") " pod="openstack/ceilometer-0" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.615890 5133 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/74c8b0ac-4ff7-4fea-bff7-7a6d9356e745-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.640059 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.717770 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c02422b4-72e1-4f4d-8efe-b4460618f820-run-httpd\") pod \"ceilometer-0\" (UID: \"c02422b4-72e1-4f4d-8efe-b4460618f820\") " pod="openstack/ceilometer-0" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.717924 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c02422b4-72e1-4f4d-8efe-b4460618f820-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c02422b4-72e1-4f4d-8efe-b4460618f820\") " pod="openstack/ceilometer-0" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.718037 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c02422b4-72e1-4f4d-8efe-b4460618f820-scripts\") pod \"ceilometer-0\" (UID: \"c02422b4-72e1-4f4d-8efe-b4460618f820\") " pod="openstack/ceilometer-0" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.718078 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c02422b4-72e1-4f4d-8efe-b4460618f820-config-data\") pod \"ceilometer-0\" (UID: \"c02422b4-72e1-4f4d-8efe-b4460618f820\") " pod="openstack/ceilometer-0" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.718128 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7xnb8\" (UniqueName: \"kubernetes.io/projected/c02422b4-72e1-4f4d-8efe-b4460618f820-kube-api-access-7xnb8\") pod \"ceilometer-0\" (UID: \"c02422b4-72e1-4f4d-8efe-b4460618f820\") " pod="openstack/ceilometer-0" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.718229 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c02422b4-72e1-4f4d-8efe-b4460618f820-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c02422b4-72e1-4f4d-8efe-b4460618f820\") " pod="openstack/ceilometer-0" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.718320 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/c02422b4-72e1-4f4d-8efe-b4460618f820-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"c02422b4-72e1-4f4d-8efe-b4460618f820\") " pod="openstack/ceilometer-0" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.718353 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c02422b4-72e1-4f4d-8efe-b4460618f820-log-httpd\") pod \"ceilometer-0\" (UID: \"c02422b4-72e1-4f4d-8efe-b4460618f820\") " pod="openstack/ceilometer-0" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.718870 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c02422b4-72e1-4f4d-8efe-b4460618f820-log-httpd\") pod \"ceilometer-0\" (UID: \"c02422b4-72e1-4f4d-8efe-b4460618f820\") " pod="openstack/ceilometer-0" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.720522 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c02422b4-72e1-4f4d-8efe-b4460618f820-run-httpd\") pod \"ceilometer-0\" (UID: \"c02422b4-72e1-4f4d-8efe-b4460618f820\") " pod="openstack/ceilometer-0" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.727319 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/c02422b4-72e1-4f4d-8efe-b4460618f820-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"c02422b4-72e1-4f4d-8efe-b4460618f820\") " pod="openstack/ceilometer-0" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.735832 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c02422b4-72e1-4f4d-8efe-b4460618f820-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c02422b4-72e1-4f4d-8efe-b4460618f820\") " pod="openstack/ceilometer-0" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.736367 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c02422b4-72e1-4f4d-8efe-b4460618f820-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c02422b4-72e1-4f4d-8efe-b4460618f820\") " pod="openstack/ceilometer-0" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.736467 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c02422b4-72e1-4f4d-8efe-b4460618f820-scripts\") pod \"ceilometer-0\" (UID: \"c02422b4-72e1-4f4d-8efe-b4460618f820\") " pod="openstack/ceilometer-0" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.739656 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c02422b4-72e1-4f4d-8efe-b4460618f820-config-data\") pod \"ceilometer-0\" (UID: \"c02422b4-72e1-4f4d-8efe-b4460618f820\") " pod="openstack/ceilometer-0" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.742896 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7xnb8\" (UniqueName: \"kubernetes.io/projected/c02422b4-72e1-4f4d-8efe-b4460618f820-kube-api-access-7xnb8\") pod \"ceilometer-0\" (UID: \"c02422b4-72e1-4f4d-8efe-b4460618f820\") " pod="openstack/ceilometer-0" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.949441 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.976320 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"087e2ee4-bb11-426d-b624-2240837ee732","Type":"ContainerStarted","Data":"76c4c027f805acc2647f1d92433df48a3d4f0e0f75728adc79a128f8eb19c649"} Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.977723 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/manila-api-0" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.981758 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fbc59fbb7-dcjpl" event={"ID":"74c8b0ac-4ff7-4fea-bff7-7a6d9356e745","Type":"ContainerDied","Data":"e2df6725baf5124bf138a6961f071e82e4fea6ee8aeb6b0c0c82dfa1a0b5e7af"} Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.981797 5133 scope.go:117] "RemoveContainer" containerID="54c3b38ae97cca8f97c9c0637bb132924af3ffc49699b75bf620afbf5c0b5b21" Nov 21 14:46:36 crc kubenswrapper[5133]: I1121 14:46:36.981880 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-fbc59fbb7-dcjpl" Nov 21 14:46:37 crc kubenswrapper[5133]: I1121 14:46:37.015938 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-api-0" podStartSLOduration=8.015919724 podStartE2EDuration="8.015919724s" podCreationTimestamp="2025-11-21 14:46:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 14:46:37.003464024 +0000 UTC m=+3856.801296272" watchObservedRunningTime="2025-11-21 14:46:37.015919724 +0000 UTC m=+3856.813751972" Nov 21 14:46:37 crc kubenswrapper[5133]: I1121 14:46:37.126956 5133 scope.go:117] "RemoveContainer" containerID="5cb12f57c7d673c89569bac68aa0f81e6b315eabf894678b0750d4b0bdc3233e" Nov 21 14:46:37 crc kubenswrapper[5133]: I1121 14:46:37.133314 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-fbc59fbb7-dcjpl"] Nov 21 14:46:37 crc kubenswrapper[5133]: I1121 14:46:37.150428 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-fbc59fbb7-dcjpl"] Nov 21 14:46:37 crc kubenswrapper[5133]: I1121 14:46:37.481584 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 21 14:46:37 crc kubenswrapper[5133]: W1121 14:46:37.487702 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc02422b4_72e1_4f4d_8efe_b4460618f820.slice/crio-8689e89cd14325c211d3076d5206d2ac8594d10e49cacf10c0f033dda525099d WatchSource:0}: Error finding container 8689e89cd14325c211d3076d5206d2ac8594d10e49cacf10c0f033dda525099d: Status 404 returned error can't find the container with id 8689e89cd14325c211d3076d5206d2ac8594d10e49cacf10c0f033dda525099d Nov 21 14:46:37 crc kubenswrapper[5133]: I1121 14:46:37.628393 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 21 14:46:37 crc kubenswrapper[5133]: I1121 14:46:37.992649 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c02422b4-72e1-4f4d-8efe-b4460618f820","Type":"ContainerStarted","Data":"8689e89cd14325c211d3076d5206d2ac8594d10e49cacf10c0f033dda525099d"} Nov 21 14:46:37 crc kubenswrapper[5133]: I1121 14:46:37.994484 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"d68c59a3-abc6-4cdd-bdfa-f3eac726fa81","Type":"ContainerStarted","Data":"9d1884d4fd74d01dad9ad4112c918cdbf473493041982b768cf400fcf954e9f8"} Nov 21 14:46:38 crc kubenswrapper[5133]: I1121 14:46:38.484611 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="74c8b0ac-4ff7-4fea-bff7-7a6d9356e745" path="/var/lib/kubelet/pods/74c8b0ac-4ff7-4fea-bff7-7a6d9356e745/volumes" Nov 21 14:46:38 crc kubenswrapper[5133]: I1121 14:46:38.485866 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dfdce579-9d1a-4826-9d21-bf9061ab6c01" path="/var/lib/kubelet/pods/dfdce579-9d1a-4826-9d21-bf9061ab6c01/volumes" Nov 21 14:46:39 crc kubenswrapper[5133]: I1121 14:46:39.008127 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"d68c59a3-abc6-4cdd-bdfa-f3eac726fa81","Type":"ContainerStarted","Data":"89dc4267dc5a9be3c23a797689f0c842e0f70bca5e16c8f6fc959353e37f64da"} Nov 21 14:46:39 crc kubenswrapper[5133]: I1121 14:46:39.031221 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-share-share1-0" podStartSLOduration=3.994255235 podStartE2EDuration="16.031168081s" podCreationTimestamp="2025-11-21 14:46:23 +0000 UTC" firstStartedPulling="2025-11-21 14:46:24.20177491 +0000 UTC m=+3843.999607158" lastFinishedPulling="2025-11-21 14:46:36.238687756 +0000 UTC m=+3856.036520004" observedRunningTime="2025-11-21 14:46:39.025309395 +0000 UTC m=+3858.823141653" watchObservedRunningTime="2025-11-21 14:46:39.031168081 +0000 UTC m=+3858.829000329" Nov 21 14:46:40 crc kubenswrapper[5133]: I1121 14:46:40.017541 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c02422b4-72e1-4f4d-8efe-b4460618f820","Type":"ContainerStarted","Data":"8294d27b58b5a6cc9e1bdc48d4a72326d4546dabcdb4a0ef6fc8e689dde5f15c"} Nov 21 14:46:41 crc kubenswrapper[5133]: I1121 14:46:41.034093 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c02422b4-72e1-4f4d-8efe-b4460618f820","Type":"ContainerStarted","Data":"ba6a7311d8acebe79a8dd0e8708f6631c6a82701feac1f716edf35f8c036ee2a"} Nov 21 14:46:43 crc kubenswrapper[5133]: I1121 14:46:43.059145 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c02422b4-72e1-4f4d-8efe-b4460618f820","Type":"ContainerStarted","Data":"7b0e7d8074144025aeab989046f293fe1256fec2cbb03d5cb044ccb2dd5e1c13"} Nov 21 14:46:43 crc kubenswrapper[5133]: I1121 14:46:43.460164 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/manila-share-share1-0" Nov 21 14:46:45 crc kubenswrapper[5133]: I1121 14:46:45.120286 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/manila-scheduler-0" Nov 21 14:46:45 crc kubenswrapper[5133]: I1121 14:46:45.201737 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-scheduler-0"] Nov 21 14:46:48 crc kubenswrapper[5133]: I1121 14:46:46.087494 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/manila-scheduler-0" podUID="25cbce04-5851-4568-b3f3-fb5eefbd4b7f" containerName="manila-scheduler" containerID="cri-o://49ec7803a188c49e22750d57be3fe55e46ebe69e20ab689d34554888314cb8f5" gracePeriod=30 Nov 21 14:46:48 crc kubenswrapper[5133]: I1121 14:46:46.087582 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/manila-scheduler-0" podUID="25cbce04-5851-4568-b3f3-fb5eefbd4b7f" containerName="probe" containerID="cri-o://63389e8539fe33a8bc102236e3111eadb356ac37452308b9ce1f34eff75b8fd4" gracePeriod=30 Nov 21 14:46:48 crc kubenswrapper[5133]: I1121 14:46:48.130654 5133 generic.go:334] "Generic (PLEG): container finished" podID="25cbce04-5851-4568-b3f3-fb5eefbd4b7f" containerID="63389e8539fe33a8bc102236e3111eadb356ac37452308b9ce1f34eff75b8fd4" exitCode=0 Nov 21 14:46:48 crc kubenswrapper[5133]: I1121 14:46:48.130716 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"25cbce04-5851-4568-b3f3-fb5eefbd4b7f","Type":"ContainerDied","Data":"63389e8539fe33a8bc102236e3111eadb356ac37452308b9ce1f34eff75b8fd4"} Nov 21 14:46:49 crc kubenswrapper[5133]: I1121 14:46:49.121879 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-scheduler-0" Nov 21 14:46:49 crc kubenswrapper[5133]: I1121 14:46:49.143987 5133 generic.go:334] "Generic (PLEG): container finished" podID="25cbce04-5851-4568-b3f3-fb5eefbd4b7f" containerID="49ec7803a188c49e22750d57be3fe55e46ebe69e20ab689d34554888314cb8f5" exitCode=0 Nov 21 14:46:49 crc kubenswrapper[5133]: I1121 14:46:49.144315 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-scheduler-0" Nov 21 14:46:49 crc kubenswrapper[5133]: I1121 14:46:49.144331 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"25cbce04-5851-4568-b3f3-fb5eefbd4b7f","Type":"ContainerDied","Data":"49ec7803a188c49e22750d57be3fe55e46ebe69e20ab689d34554888314cb8f5"} Nov 21 14:46:49 crc kubenswrapper[5133]: I1121 14:46:49.145188 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"25cbce04-5851-4568-b3f3-fb5eefbd4b7f","Type":"ContainerDied","Data":"5d54fddd84d9fa9b39866a8dc43296bf8ab7207c83204363de7632c6e3383045"} Nov 21 14:46:49 crc kubenswrapper[5133]: I1121 14:46:49.145223 5133 scope.go:117] "RemoveContainer" containerID="63389e8539fe33a8bc102236e3111eadb356ac37452308b9ce1f34eff75b8fd4" Nov 21 14:46:49 crc kubenswrapper[5133]: I1121 14:46:49.170784 5133 scope.go:117] "RemoveContainer" containerID="49ec7803a188c49e22750d57be3fe55e46ebe69e20ab689d34554888314cb8f5" Nov 21 14:46:49 crc kubenswrapper[5133]: I1121 14:46:49.213967 5133 scope.go:117] "RemoveContainer" containerID="63389e8539fe33a8bc102236e3111eadb356ac37452308b9ce1f34eff75b8fd4" Nov 21 14:46:49 crc kubenswrapper[5133]: E1121 14:46:49.214884 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"63389e8539fe33a8bc102236e3111eadb356ac37452308b9ce1f34eff75b8fd4\": container with ID starting with 63389e8539fe33a8bc102236e3111eadb356ac37452308b9ce1f34eff75b8fd4 not found: ID does not exist" containerID="63389e8539fe33a8bc102236e3111eadb356ac37452308b9ce1f34eff75b8fd4" Nov 21 14:46:49 crc kubenswrapper[5133]: I1121 14:46:49.215017 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"63389e8539fe33a8bc102236e3111eadb356ac37452308b9ce1f34eff75b8fd4"} err="failed to get container status \"63389e8539fe33a8bc102236e3111eadb356ac37452308b9ce1f34eff75b8fd4\": rpc error: code = NotFound desc = could not find container \"63389e8539fe33a8bc102236e3111eadb356ac37452308b9ce1f34eff75b8fd4\": container with ID starting with 63389e8539fe33a8bc102236e3111eadb356ac37452308b9ce1f34eff75b8fd4 not found: ID does not exist" Nov 21 14:46:49 crc kubenswrapper[5133]: I1121 14:46:49.215165 5133 scope.go:117] "RemoveContainer" containerID="49ec7803a188c49e22750d57be3fe55e46ebe69e20ab689d34554888314cb8f5" Nov 21 14:46:49 crc kubenswrapper[5133]: E1121 14:46:49.215626 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"49ec7803a188c49e22750d57be3fe55e46ebe69e20ab689d34554888314cb8f5\": container with ID starting with 49ec7803a188c49e22750d57be3fe55e46ebe69e20ab689d34554888314cb8f5 not found: ID does not exist" containerID="49ec7803a188c49e22750d57be3fe55e46ebe69e20ab689d34554888314cb8f5" Nov 21 14:46:49 crc kubenswrapper[5133]: I1121 14:46:49.215710 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"49ec7803a188c49e22750d57be3fe55e46ebe69e20ab689d34554888314cb8f5"} err="failed to get container status \"49ec7803a188c49e22750d57be3fe55e46ebe69e20ab689d34554888314cb8f5\": rpc error: code = NotFound desc = could not find container \"49ec7803a188c49e22750d57be3fe55e46ebe69e20ab689d34554888314cb8f5\": container with ID starting with 49ec7803a188c49e22750d57be3fe55e46ebe69e20ab689d34554888314cb8f5 not found: ID does not exist" Nov 21 14:46:49 crc kubenswrapper[5133]: I1121 14:46:49.275625 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/25cbce04-5851-4568-b3f3-fb5eefbd4b7f-combined-ca-bundle\") pod \"25cbce04-5851-4568-b3f3-fb5eefbd4b7f\" (UID: \"25cbce04-5851-4568-b3f3-fb5eefbd4b7f\") " Nov 21 14:46:49 crc kubenswrapper[5133]: I1121 14:46:49.276068 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fdg9t\" (UniqueName: \"kubernetes.io/projected/25cbce04-5851-4568-b3f3-fb5eefbd4b7f-kube-api-access-fdg9t\") pod \"25cbce04-5851-4568-b3f3-fb5eefbd4b7f\" (UID: \"25cbce04-5851-4568-b3f3-fb5eefbd4b7f\") " Nov 21 14:46:49 crc kubenswrapper[5133]: I1121 14:46:49.276174 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/25cbce04-5851-4568-b3f3-fb5eefbd4b7f-scripts\") pod \"25cbce04-5851-4568-b3f3-fb5eefbd4b7f\" (UID: \"25cbce04-5851-4568-b3f3-fb5eefbd4b7f\") " Nov 21 14:46:49 crc kubenswrapper[5133]: I1121 14:46:49.276321 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/25cbce04-5851-4568-b3f3-fb5eefbd4b7f-etc-machine-id\") pod \"25cbce04-5851-4568-b3f3-fb5eefbd4b7f\" (UID: \"25cbce04-5851-4568-b3f3-fb5eefbd4b7f\") " Nov 21 14:46:49 crc kubenswrapper[5133]: I1121 14:46:49.276421 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/25cbce04-5851-4568-b3f3-fb5eefbd4b7f-config-data\") pod \"25cbce04-5851-4568-b3f3-fb5eefbd4b7f\" (UID: \"25cbce04-5851-4568-b3f3-fb5eefbd4b7f\") " Nov 21 14:46:49 crc kubenswrapper[5133]: I1121 14:46:49.276492 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/25cbce04-5851-4568-b3f3-fb5eefbd4b7f-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "25cbce04-5851-4568-b3f3-fb5eefbd4b7f" (UID: "25cbce04-5851-4568-b3f3-fb5eefbd4b7f"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 14:46:49 crc kubenswrapper[5133]: I1121 14:46:49.276672 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/25cbce04-5851-4568-b3f3-fb5eefbd4b7f-config-data-custom\") pod \"25cbce04-5851-4568-b3f3-fb5eefbd4b7f\" (UID: \"25cbce04-5851-4568-b3f3-fb5eefbd4b7f\") " Nov 21 14:46:49 crc kubenswrapper[5133]: I1121 14:46:49.277153 5133 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/25cbce04-5851-4568-b3f3-fb5eefbd4b7f-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 21 14:46:49 crc kubenswrapper[5133]: I1121 14:46:49.280551 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25cbce04-5851-4568-b3f3-fb5eefbd4b7f-scripts" (OuterVolumeSpecName: "scripts") pod "25cbce04-5851-4568-b3f3-fb5eefbd4b7f" (UID: "25cbce04-5851-4568-b3f3-fb5eefbd4b7f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:46:49 crc kubenswrapper[5133]: I1121 14:46:49.281225 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25cbce04-5851-4568-b3f3-fb5eefbd4b7f-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "25cbce04-5851-4568-b3f3-fb5eefbd4b7f" (UID: "25cbce04-5851-4568-b3f3-fb5eefbd4b7f"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:46:49 crc kubenswrapper[5133]: I1121 14:46:49.282053 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25cbce04-5851-4568-b3f3-fb5eefbd4b7f-kube-api-access-fdg9t" (OuterVolumeSpecName: "kube-api-access-fdg9t") pod "25cbce04-5851-4568-b3f3-fb5eefbd4b7f" (UID: "25cbce04-5851-4568-b3f3-fb5eefbd4b7f"). InnerVolumeSpecName "kube-api-access-fdg9t". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:46:49 crc kubenswrapper[5133]: I1121 14:46:49.351211 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25cbce04-5851-4568-b3f3-fb5eefbd4b7f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "25cbce04-5851-4568-b3f3-fb5eefbd4b7f" (UID: "25cbce04-5851-4568-b3f3-fb5eefbd4b7f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:46:49 crc kubenswrapper[5133]: I1121 14:46:49.379848 5133 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/25cbce04-5851-4568-b3f3-fb5eefbd4b7f-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 14:46:49 crc kubenswrapper[5133]: I1121 14:46:49.379901 5133 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/25cbce04-5851-4568-b3f3-fb5eefbd4b7f-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 21 14:46:49 crc kubenswrapper[5133]: I1121 14:46:49.379925 5133 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/25cbce04-5851-4568-b3f3-fb5eefbd4b7f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 14:46:49 crc kubenswrapper[5133]: I1121 14:46:49.379944 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fdg9t\" (UniqueName: \"kubernetes.io/projected/25cbce04-5851-4568-b3f3-fb5eefbd4b7f-kube-api-access-fdg9t\") on node \"crc\" DevicePath \"\"" Nov 21 14:46:49 crc kubenswrapper[5133]: I1121 14:46:49.391423 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25cbce04-5851-4568-b3f3-fb5eefbd4b7f-config-data" (OuterVolumeSpecName: "config-data") pod "25cbce04-5851-4568-b3f3-fb5eefbd4b7f" (UID: "25cbce04-5851-4568-b3f3-fb5eefbd4b7f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:46:49 crc kubenswrapper[5133]: I1121 14:46:49.476884 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-scheduler-0"] Nov 21 14:46:49 crc kubenswrapper[5133]: I1121 14:46:49.482672 5133 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/25cbce04-5851-4568-b3f3-fb5eefbd4b7f-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 14:46:49 crc kubenswrapper[5133]: I1121 14:46:49.484931 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/manila-scheduler-0"] Nov 21 14:46:49 crc kubenswrapper[5133]: I1121 14:46:49.496788 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-scheduler-0"] Nov 21 14:46:49 crc kubenswrapper[5133]: E1121 14:46:49.497221 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="25cbce04-5851-4568-b3f3-fb5eefbd4b7f" containerName="probe" Nov 21 14:46:49 crc kubenswrapper[5133]: I1121 14:46:49.497241 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="25cbce04-5851-4568-b3f3-fb5eefbd4b7f" containerName="probe" Nov 21 14:46:49 crc kubenswrapper[5133]: E1121 14:46:49.497279 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="25cbce04-5851-4568-b3f3-fb5eefbd4b7f" containerName="manila-scheduler" Nov 21 14:46:49 crc kubenswrapper[5133]: I1121 14:46:49.497286 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="25cbce04-5851-4568-b3f3-fb5eefbd4b7f" containerName="manila-scheduler" Nov 21 14:46:49 crc kubenswrapper[5133]: I1121 14:46:49.497448 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="25cbce04-5851-4568-b3f3-fb5eefbd4b7f" containerName="manila-scheduler" Nov 21 14:46:49 crc kubenswrapper[5133]: I1121 14:46:49.497470 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="25cbce04-5851-4568-b3f3-fb5eefbd4b7f" containerName="probe" Nov 21 14:46:49 crc kubenswrapper[5133]: I1121 14:46:49.498364 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-scheduler-0" Nov 21 14:46:49 crc kubenswrapper[5133]: I1121 14:46:49.502068 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-scheduler-config-data" Nov 21 14:46:49 crc kubenswrapper[5133]: I1121 14:46:49.506764 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-scheduler-0"] Nov 21 14:46:49 crc kubenswrapper[5133]: I1121 14:46:49.686142 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1306baba-7e9f-4f12-a43d-2c39cd5eb212-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"1306baba-7e9f-4f12-a43d-2c39cd5eb212\") " pod="openstack/manila-scheduler-0" Nov 21 14:46:49 crc kubenswrapper[5133]: I1121 14:46:49.686805 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1306baba-7e9f-4f12-a43d-2c39cd5eb212-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"1306baba-7e9f-4f12-a43d-2c39cd5eb212\") " pod="openstack/manila-scheduler-0" Nov 21 14:46:49 crc kubenswrapper[5133]: I1121 14:46:49.686878 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1306baba-7e9f-4f12-a43d-2c39cd5eb212-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"1306baba-7e9f-4f12-a43d-2c39cd5eb212\") " pod="openstack/manila-scheduler-0" Nov 21 14:46:49 crc kubenswrapper[5133]: I1121 14:46:49.686934 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1306baba-7e9f-4f12-a43d-2c39cd5eb212-config-data\") pod \"manila-scheduler-0\" (UID: \"1306baba-7e9f-4f12-a43d-2c39cd5eb212\") " pod="openstack/manila-scheduler-0" Nov 21 14:46:49 crc kubenswrapper[5133]: I1121 14:46:49.687056 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6f2db\" (UniqueName: \"kubernetes.io/projected/1306baba-7e9f-4f12-a43d-2c39cd5eb212-kube-api-access-6f2db\") pod \"manila-scheduler-0\" (UID: \"1306baba-7e9f-4f12-a43d-2c39cd5eb212\") " pod="openstack/manila-scheduler-0" Nov 21 14:46:49 crc kubenswrapper[5133]: I1121 14:46:49.687095 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1306baba-7e9f-4f12-a43d-2c39cd5eb212-scripts\") pod \"manila-scheduler-0\" (UID: \"1306baba-7e9f-4f12-a43d-2c39cd5eb212\") " pod="openstack/manila-scheduler-0" Nov 21 14:46:49 crc kubenswrapper[5133]: I1121 14:46:49.788740 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1306baba-7e9f-4f12-a43d-2c39cd5eb212-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"1306baba-7e9f-4f12-a43d-2c39cd5eb212\") " pod="openstack/manila-scheduler-0" Nov 21 14:46:49 crc kubenswrapper[5133]: I1121 14:46:49.788819 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1306baba-7e9f-4f12-a43d-2c39cd5eb212-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"1306baba-7e9f-4f12-a43d-2c39cd5eb212\") " pod="openstack/manila-scheduler-0" Nov 21 14:46:49 crc kubenswrapper[5133]: I1121 14:46:49.788877 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1306baba-7e9f-4f12-a43d-2c39cd5eb212-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"1306baba-7e9f-4f12-a43d-2c39cd5eb212\") " pod="openstack/manila-scheduler-0" Nov 21 14:46:49 crc kubenswrapper[5133]: I1121 14:46:49.788913 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1306baba-7e9f-4f12-a43d-2c39cd5eb212-config-data\") pod \"manila-scheduler-0\" (UID: \"1306baba-7e9f-4f12-a43d-2c39cd5eb212\") " pod="openstack/manila-scheduler-0" Nov 21 14:46:49 crc kubenswrapper[5133]: I1121 14:46:49.788940 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1306baba-7e9f-4f12-a43d-2c39cd5eb212-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"1306baba-7e9f-4f12-a43d-2c39cd5eb212\") " pod="openstack/manila-scheduler-0" Nov 21 14:46:49 crc kubenswrapper[5133]: I1121 14:46:49.788952 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6f2db\" (UniqueName: \"kubernetes.io/projected/1306baba-7e9f-4f12-a43d-2c39cd5eb212-kube-api-access-6f2db\") pod \"manila-scheduler-0\" (UID: \"1306baba-7e9f-4f12-a43d-2c39cd5eb212\") " pod="openstack/manila-scheduler-0" Nov 21 14:46:49 crc kubenswrapper[5133]: I1121 14:46:49.789094 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1306baba-7e9f-4f12-a43d-2c39cd5eb212-scripts\") pod \"manila-scheduler-0\" (UID: \"1306baba-7e9f-4f12-a43d-2c39cd5eb212\") " pod="openstack/manila-scheduler-0" Nov 21 14:46:49 crc kubenswrapper[5133]: I1121 14:46:49.793227 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1306baba-7e9f-4f12-a43d-2c39cd5eb212-config-data\") pod \"manila-scheduler-0\" (UID: \"1306baba-7e9f-4f12-a43d-2c39cd5eb212\") " pod="openstack/manila-scheduler-0" Nov 21 14:46:49 crc kubenswrapper[5133]: I1121 14:46:49.793529 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1306baba-7e9f-4f12-a43d-2c39cd5eb212-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"1306baba-7e9f-4f12-a43d-2c39cd5eb212\") " pod="openstack/manila-scheduler-0" Nov 21 14:46:49 crc kubenswrapper[5133]: I1121 14:46:49.794743 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1306baba-7e9f-4f12-a43d-2c39cd5eb212-scripts\") pod \"manila-scheduler-0\" (UID: \"1306baba-7e9f-4f12-a43d-2c39cd5eb212\") " pod="openstack/manila-scheduler-0" Nov 21 14:46:49 crc kubenswrapper[5133]: I1121 14:46:49.797487 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1306baba-7e9f-4f12-a43d-2c39cd5eb212-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"1306baba-7e9f-4f12-a43d-2c39cd5eb212\") " pod="openstack/manila-scheduler-0" Nov 21 14:46:49 crc kubenswrapper[5133]: I1121 14:46:49.811134 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6f2db\" (UniqueName: \"kubernetes.io/projected/1306baba-7e9f-4f12-a43d-2c39cd5eb212-kube-api-access-6f2db\") pod \"manila-scheduler-0\" (UID: \"1306baba-7e9f-4f12-a43d-2c39cd5eb212\") " pod="openstack/manila-scheduler-0" Nov 21 14:46:49 crc kubenswrapper[5133]: I1121 14:46:49.874673 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-scheduler-0" Nov 21 14:46:50 crc kubenswrapper[5133]: I1121 14:46:50.158316 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c02422b4-72e1-4f4d-8efe-b4460618f820","Type":"ContainerStarted","Data":"ba5c9692ff804c667d430b2d8f6ed471c5f43c707a50ac45e26f91417b9e20b3"} Nov 21 14:46:50 crc kubenswrapper[5133]: I1121 14:46:50.159145 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 21 14:46:50 crc kubenswrapper[5133]: I1121 14:46:50.159480 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c02422b4-72e1-4f4d-8efe-b4460618f820" containerName="ceilometer-central-agent" containerID="cri-o://8294d27b58b5a6cc9e1bdc48d4a72326d4546dabcdb4a0ef6fc8e689dde5f15c" gracePeriod=30 Nov 21 14:46:50 crc kubenswrapper[5133]: I1121 14:46:50.159611 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c02422b4-72e1-4f4d-8efe-b4460618f820" containerName="proxy-httpd" containerID="cri-o://ba5c9692ff804c667d430b2d8f6ed471c5f43c707a50ac45e26f91417b9e20b3" gracePeriod=30 Nov 21 14:46:50 crc kubenswrapper[5133]: I1121 14:46:50.159667 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c02422b4-72e1-4f4d-8efe-b4460618f820" containerName="sg-core" containerID="cri-o://7b0e7d8074144025aeab989046f293fe1256fec2cbb03d5cb044ccb2dd5e1c13" gracePeriod=30 Nov 21 14:46:50 crc kubenswrapper[5133]: I1121 14:46:50.159714 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c02422b4-72e1-4f4d-8efe-b4460618f820" containerName="ceilometer-notification-agent" containerID="cri-o://ba6a7311d8acebe79a8dd0e8708f6631c6a82701feac1f716edf35f8c036ee2a" gracePeriod=30 Nov 21 14:46:50 crc kubenswrapper[5133]: I1121 14:46:50.189964 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.7193586979999997 podStartE2EDuration="14.189940856s" podCreationTimestamp="2025-11-21 14:46:36 +0000 UTC" firstStartedPulling="2025-11-21 14:46:37.490306336 +0000 UTC m=+3857.288138584" lastFinishedPulling="2025-11-21 14:46:48.960888484 +0000 UTC m=+3868.758720742" observedRunningTime="2025-11-21 14:46:50.182662023 +0000 UTC m=+3869.980494301" watchObservedRunningTime="2025-11-21 14:46:50.189940856 +0000 UTC m=+3869.987773114" Nov 21 14:46:50 crc kubenswrapper[5133]: I1121 14:46:50.407149 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-scheduler-0"] Nov 21 14:46:50 crc kubenswrapper[5133]: W1121 14:46:50.416451 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1306baba_7e9f_4f12_a43d_2c39cd5eb212.slice/crio-bbd63b6fa3fa07902bcf65367fded068446dfdb077c939008456c749dd221069 WatchSource:0}: Error finding container bbd63b6fa3fa07902bcf65367fded068446dfdb077c939008456c749dd221069: Status 404 returned error can't find the container with id bbd63b6fa3fa07902bcf65367fded068446dfdb077c939008456c749dd221069 Nov 21 14:46:50 crc kubenswrapper[5133]: I1121 14:46:50.459733 5133 scope.go:117] "RemoveContainer" containerID="8580d7a7211cee4078050130d68876fc21d141fe3a8eaa4aa514ada3bc5ab459" Nov 21 14:46:50 crc kubenswrapper[5133]: E1121 14:46:50.460131 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:46:50 crc kubenswrapper[5133]: I1121 14:46:50.475411 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25cbce04-5851-4568-b3f3-fb5eefbd4b7f" path="/var/lib/kubelet/pods/25cbce04-5851-4568-b3f3-fb5eefbd4b7f/volumes" Nov 21 14:46:51 crc kubenswrapper[5133]: I1121 14:46:51.172189 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"1306baba-7e9f-4f12-a43d-2c39cd5eb212","Type":"ContainerStarted","Data":"c3f0788d1f926441b6152f880bf2d44b9afcad082c0adfd1e683f80e0129e76e"} Nov 21 14:46:51 crc kubenswrapper[5133]: I1121 14:46:51.172504 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"1306baba-7e9f-4f12-a43d-2c39cd5eb212","Type":"ContainerStarted","Data":"bbd63b6fa3fa07902bcf65367fded068446dfdb077c939008456c749dd221069"} Nov 21 14:46:51 crc kubenswrapper[5133]: I1121 14:46:51.177976 5133 generic.go:334] "Generic (PLEG): container finished" podID="c02422b4-72e1-4f4d-8efe-b4460618f820" containerID="ba5c9692ff804c667d430b2d8f6ed471c5f43c707a50ac45e26f91417b9e20b3" exitCode=0 Nov 21 14:46:51 crc kubenswrapper[5133]: I1121 14:46:51.178031 5133 generic.go:334] "Generic (PLEG): container finished" podID="c02422b4-72e1-4f4d-8efe-b4460618f820" containerID="7b0e7d8074144025aeab989046f293fe1256fec2cbb03d5cb044ccb2dd5e1c13" exitCode=2 Nov 21 14:46:51 crc kubenswrapper[5133]: I1121 14:46:51.178040 5133 generic.go:334] "Generic (PLEG): container finished" podID="c02422b4-72e1-4f4d-8efe-b4460618f820" containerID="ba6a7311d8acebe79a8dd0e8708f6631c6a82701feac1f716edf35f8c036ee2a" exitCode=0 Nov 21 14:46:51 crc kubenswrapper[5133]: I1121 14:46:51.178048 5133 generic.go:334] "Generic (PLEG): container finished" podID="c02422b4-72e1-4f4d-8efe-b4460618f820" containerID="8294d27b58b5a6cc9e1bdc48d4a72326d4546dabcdb4a0ef6fc8e689dde5f15c" exitCode=0 Nov 21 14:46:51 crc kubenswrapper[5133]: I1121 14:46:51.178052 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c02422b4-72e1-4f4d-8efe-b4460618f820","Type":"ContainerDied","Data":"ba5c9692ff804c667d430b2d8f6ed471c5f43c707a50ac45e26f91417b9e20b3"} Nov 21 14:46:51 crc kubenswrapper[5133]: I1121 14:46:51.178113 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c02422b4-72e1-4f4d-8efe-b4460618f820","Type":"ContainerDied","Data":"7b0e7d8074144025aeab989046f293fe1256fec2cbb03d5cb044ccb2dd5e1c13"} Nov 21 14:46:51 crc kubenswrapper[5133]: I1121 14:46:51.178126 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c02422b4-72e1-4f4d-8efe-b4460618f820","Type":"ContainerDied","Data":"ba6a7311d8acebe79a8dd0e8708f6631c6a82701feac1f716edf35f8c036ee2a"} Nov 21 14:46:51 crc kubenswrapper[5133]: I1121 14:46:51.178141 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c02422b4-72e1-4f4d-8efe-b4460618f820","Type":"ContainerDied","Data":"8294d27b58b5a6cc9e1bdc48d4a72326d4546dabcdb4a0ef6fc8e689dde5f15c"} Nov 21 14:46:51 crc kubenswrapper[5133]: I1121 14:46:51.365460 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/manila-api-0" Nov 21 14:46:51 crc kubenswrapper[5133]: I1121 14:46:51.826587 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 14:46:51 crc kubenswrapper[5133]: I1121 14:46:51.933414 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c02422b4-72e1-4f4d-8efe-b4460618f820-log-httpd\") pod \"c02422b4-72e1-4f4d-8efe-b4460618f820\" (UID: \"c02422b4-72e1-4f4d-8efe-b4460618f820\") " Nov 21 14:46:51 crc kubenswrapper[5133]: I1121 14:46:51.933472 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c02422b4-72e1-4f4d-8efe-b4460618f820-config-data\") pod \"c02422b4-72e1-4f4d-8efe-b4460618f820\" (UID: \"c02422b4-72e1-4f4d-8efe-b4460618f820\") " Nov 21 14:46:51 crc kubenswrapper[5133]: I1121 14:46:51.933493 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c02422b4-72e1-4f4d-8efe-b4460618f820-sg-core-conf-yaml\") pod \"c02422b4-72e1-4f4d-8efe-b4460618f820\" (UID: \"c02422b4-72e1-4f4d-8efe-b4460618f820\") " Nov 21 14:46:51 crc kubenswrapper[5133]: I1121 14:46:51.933521 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c02422b4-72e1-4f4d-8efe-b4460618f820-combined-ca-bundle\") pod \"c02422b4-72e1-4f4d-8efe-b4460618f820\" (UID: \"c02422b4-72e1-4f4d-8efe-b4460618f820\") " Nov 21 14:46:51 crc kubenswrapper[5133]: I1121 14:46:51.933587 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7xnb8\" (UniqueName: \"kubernetes.io/projected/c02422b4-72e1-4f4d-8efe-b4460618f820-kube-api-access-7xnb8\") pod \"c02422b4-72e1-4f4d-8efe-b4460618f820\" (UID: \"c02422b4-72e1-4f4d-8efe-b4460618f820\") " Nov 21 14:46:51 crc kubenswrapper[5133]: I1121 14:46:51.933714 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/c02422b4-72e1-4f4d-8efe-b4460618f820-ceilometer-tls-certs\") pod \"c02422b4-72e1-4f4d-8efe-b4460618f820\" (UID: \"c02422b4-72e1-4f4d-8efe-b4460618f820\") " Nov 21 14:46:51 crc kubenswrapper[5133]: I1121 14:46:51.933787 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c02422b4-72e1-4f4d-8efe-b4460618f820-scripts\") pod \"c02422b4-72e1-4f4d-8efe-b4460618f820\" (UID: \"c02422b4-72e1-4f4d-8efe-b4460618f820\") " Nov 21 14:46:51 crc kubenswrapper[5133]: I1121 14:46:51.933823 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c02422b4-72e1-4f4d-8efe-b4460618f820-run-httpd\") pod \"c02422b4-72e1-4f4d-8efe-b4460618f820\" (UID: \"c02422b4-72e1-4f4d-8efe-b4460618f820\") " Nov 21 14:46:51 crc kubenswrapper[5133]: I1121 14:46:51.934940 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c02422b4-72e1-4f4d-8efe-b4460618f820-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "c02422b4-72e1-4f4d-8efe-b4460618f820" (UID: "c02422b4-72e1-4f4d-8efe-b4460618f820"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:46:51 crc kubenswrapper[5133]: I1121 14:46:51.941103 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c02422b4-72e1-4f4d-8efe-b4460618f820-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "c02422b4-72e1-4f4d-8efe-b4460618f820" (UID: "c02422b4-72e1-4f4d-8efe-b4460618f820"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:46:51 crc kubenswrapper[5133]: I1121 14:46:51.946205 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c02422b4-72e1-4f4d-8efe-b4460618f820-scripts" (OuterVolumeSpecName: "scripts") pod "c02422b4-72e1-4f4d-8efe-b4460618f820" (UID: "c02422b4-72e1-4f4d-8efe-b4460618f820"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:46:51 crc kubenswrapper[5133]: I1121 14:46:51.955766 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c02422b4-72e1-4f4d-8efe-b4460618f820-kube-api-access-7xnb8" (OuterVolumeSpecName: "kube-api-access-7xnb8") pod "c02422b4-72e1-4f4d-8efe-b4460618f820" (UID: "c02422b4-72e1-4f4d-8efe-b4460618f820"). InnerVolumeSpecName "kube-api-access-7xnb8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:46:51 crc kubenswrapper[5133]: I1121 14:46:51.996560 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c02422b4-72e1-4f4d-8efe-b4460618f820-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "c02422b4-72e1-4f4d-8efe-b4460618f820" (UID: "c02422b4-72e1-4f4d-8efe-b4460618f820"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:46:52 crc kubenswrapper[5133]: I1121 14:46:52.008488 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c02422b4-72e1-4f4d-8efe-b4460618f820-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "c02422b4-72e1-4f4d-8efe-b4460618f820" (UID: "c02422b4-72e1-4f4d-8efe-b4460618f820"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:46:52 crc kubenswrapper[5133]: I1121 14:46:52.022666 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c02422b4-72e1-4f4d-8efe-b4460618f820-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c02422b4-72e1-4f4d-8efe-b4460618f820" (UID: "c02422b4-72e1-4f4d-8efe-b4460618f820"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:46:52 crc kubenswrapper[5133]: I1121 14:46:52.035704 5133 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c02422b4-72e1-4f4d-8efe-b4460618f820-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 14:46:52 crc kubenswrapper[5133]: I1121 14:46:52.035738 5133 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c02422b4-72e1-4f4d-8efe-b4460618f820-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 21 14:46:52 crc kubenswrapper[5133]: I1121 14:46:52.035749 5133 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c02422b4-72e1-4f4d-8efe-b4460618f820-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 21 14:46:52 crc kubenswrapper[5133]: I1121 14:46:52.035758 5133 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c02422b4-72e1-4f4d-8efe-b4460618f820-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 21 14:46:52 crc kubenswrapper[5133]: I1121 14:46:52.035769 5133 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c02422b4-72e1-4f4d-8efe-b4460618f820-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 14:46:52 crc kubenswrapper[5133]: I1121 14:46:52.035781 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7xnb8\" (UniqueName: \"kubernetes.io/projected/c02422b4-72e1-4f4d-8efe-b4460618f820-kube-api-access-7xnb8\") on node \"crc\" DevicePath \"\"" Nov 21 14:46:52 crc kubenswrapper[5133]: I1121 14:46:52.035791 5133 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/c02422b4-72e1-4f4d-8efe-b4460618f820-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 21 14:46:52 crc kubenswrapper[5133]: I1121 14:46:52.045368 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c02422b4-72e1-4f4d-8efe-b4460618f820-config-data" (OuterVolumeSpecName: "config-data") pod "c02422b4-72e1-4f4d-8efe-b4460618f820" (UID: "c02422b4-72e1-4f4d-8efe-b4460618f820"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:46:52 crc kubenswrapper[5133]: I1121 14:46:52.137912 5133 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c02422b4-72e1-4f4d-8efe-b4460618f820-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 14:46:52 crc kubenswrapper[5133]: I1121 14:46:52.191310 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c02422b4-72e1-4f4d-8efe-b4460618f820","Type":"ContainerDied","Data":"8689e89cd14325c211d3076d5206d2ac8594d10e49cacf10c0f033dda525099d"} Nov 21 14:46:52 crc kubenswrapper[5133]: I1121 14:46:52.191370 5133 scope.go:117] "RemoveContainer" containerID="ba5c9692ff804c667d430b2d8f6ed471c5f43c707a50ac45e26f91417b9e20b3" Nov 21 14:46:52 crc kubenswrapper[5133]: I1121 14:46:52.192757 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 14:46:52 crc kubenswrapper[5133]: I1121 14:46:52.193288 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"1306baba-7e9f-4f12-a43d-2c39cd5eb212","Type":"ContainerStarted","Data":"c4bd547f148246e73e55f8cf55970f2a3d4e9ca694f29e6e2a8792a99a037217"} Nov 21 14:46:52 crc kubenswrapper[5133]: I1121 14:46:52.218575 5133 scope.go:117] "RemoveContainer" containerID="7b0e7d8074144025aeab989046f293fe1256fec2cbb03d5cb044ccb2dd5e1c13" Nov 21 14:46:52 crc kubenswrapper[5133]: I1121 14:46:52.223821 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-scheduler-0" podStartSLOduration=3.223804587 podStartE2EDuration="3.223804587s" podCreationTimestamp="2025-11-21 14:46:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 14:46:52.216940535 +0000 UTC m=+3872.014772793" watchObservedRunningTime="2025-11-21 14:46:52.223804587 +0000 UTC m=+3872.021636835" Nov 21 14:46:52 crc kubenswrapper[5133]: I1121 14:46:52.253335 5133 scope.go:117] "RemoveContainer" containerID="ba6a7311d8acebe79a8dd0e8708f6631c6a82701feac1f716edf35f8c036ee2a" Nov 21 14:46:52 crc kubenswrapper[5133]: I1121 14:46:52.253351 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 21 14:46:52 crc kubenswrapper[5133]: I1121 14:46:52.267849 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 21 14:46:52 crc kubenswrapper[5133]: I1121 14:46:52.281180 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 21 14:46:52 crc kubenswrapper[5133]: E1121 14:46:52.281693 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c02422b4-72e1-4f4d-8efe-b4460618f820" containerName="ceilometer-central-agent" Nov 21 14:46:52 crc kubenswrapper[5133]: I1121 14:46:52.281719 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="c02422b4-72e1-4f4d-8efe-b4460618f820" containerName="ceilometer-central-agent" Nov 21 14:46:52 crc kubenswrapper[5133]: E1121 14:46:52.281747 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c02422b4-72e1-4f4d-8efe-b4460618f820" containerName="ceilometer-notification-agent" Nov 21 14:46:52 crc kubenswrapper[5133]: I1121 14:46:52.281757 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="c02422b4-72e1-4f4d-8efe-b4460618f820" containerName="ceilometer-notification-agent" Nov 21 14:46:52 crc kubenswrapper[5133]: E1121 14:46:52.281781 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c02422b4-72e1-4f4d-8efe-b4460618f820" containerName="proxy-httpd" Nov 21 14:46:52 crc kubenswrapper[5133]: I1121 14:46:52.281790 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="c02422b4-72e1-4f4d-8efe-b4460618f820" containerName="proxy-httpd" Nov 21 14:46:52 crc kubenswrapper[5133]: E1121 14:46:52.281805 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c02422b4-72e1-4f4d-8efe-b4460618f820" containerName="sg-core" Nov 21 14:46:52 crc kubenswrapper[5133]: I1121 14:46:52.281812 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="c02422b4-72e1-4f4d-8efe-b4460618f820" containerName="sg-core" Nov 21 14:46:52 crc kubenswrapper[5133]: I1121 14:46:52.282039 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="c02422b4-72e1-4f4d-8efe-b4460618f820" containerName="sg-core" Nov 21 14:46:52 crc kubenswrapper[5133]: I1121 14:46:52.282052 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="c02422b4-72e1-4f4d-8efe-b4460618f820" containerName="proxy-httpd" Nov 21 14:46:52 crc kubenswrapper[5133]: I1121 14:46:52.282065 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="c02422b4-72e1-4f4d-8efe-b4460618f820" containerName="ceilometer-central-agent" Nov 21 14:46:52 crc kubenswrapper[5133]: I1121 14:46:52.282086 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="c02422b4-72e1-4f4d-8efe-b4460618f820" containerName="ceilometer-notification-agent" Nov 21 14:46:52 crc kubenswrapper[5133]: I1121 14:46:52.284208 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 14:46:52 crc kubenswrapper[5133]: I1121 14:46:52.286846 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 21 14:46:52 crc kubenswrapper[5133]: I1121 14:46:52.287082 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 21 14:46:52 crc kubenswrapper[5133]: I1121 14:46:52.287569 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 21 14:46:52 crc kubenswrapper[5133]: I1121 14:46:52.292370 5133 scope.go:117] "RemoveContainer" containerID="8294d27b58b5a6cc9e1bdc48d4a72326d4546dabcdb4a0ef6fc8e689dde5f15c" Nov 21 14:46:52 crc kubenswrapper[5133]: I1121 14:46:52.307941 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 21 14:46:52 crc kubenswrapper[5133]: I1121 14:46:52.449727 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/404313b1-20dd-4623-ad80-ab9e2b521526-config-data\") pod \"ceilometer-0\" (UID: \"404313b1-20dd-4623-ad80-ab9e2b521526\") " pod="openstack/ceilometer-0" Nov 21 14:46:52 crc kubenswrapper[5133]: I1121 14:46:52.449802 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/404313b1-20dd-4623-ad80-ab9e2b521526-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"404313b1-20dd-4623-ad80-ab9e2b521526\") " pod="openstack/ceilometer-0" Nov 21 14:46:52 crc kubenswrapper[5133]: I1121 14:46:52.449824 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/404313b1-20dd-4623-ad80-ab9e2b521526-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"404313b1-20dd-4623-ad80-ab9e2b521526\") " pod="openstack/ceilometer-0" Nov 21 14:46:52 crc kubenswrapper[5133]: I1121 14:46:52.449889 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/404313b1-20dd-4623-ad80-ab9e2b521526-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"404313b1-20dd-4623-ad80-ab9e2b521526\") " pod="openstack/ceilometer-0" Nov 21 14:46:52 crc kubenswrapper[5133]: I1121 14:46:52.449913 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g965t\" (UniqueName: \"kubernetes.io/projected/404313b1-20dd-4623-ad80-ab9e2b521526-kube-api-access-g965t\") pod \"ceilometer-0\" (UID: \"404313b1-20dd-4623-ad80-ab9e2b521526\") " pod="openstack/ceilometer-0" Nov 21 14:46:52 crc kubenswrapper[5133]: I1121 14:46:52.449975 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/404313b1-20dd-4623-ad80-ab9e2b521526-scripts\") pod \"ceilometer-0\" (UID: \"404313b1-20dd-4623-ad80-ab9e2b521526\") " pod="openstack/ceilometer-0" Nov 21 14:46:52 crc kubenswrapper[5133]: I1121 14:46:52.449991 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/404313b1-20dd-4623-ad80-ab9e2b521526-run-httpd\") pod \"ceilometer-0\" (UID: \"404313b1-20dd-4623-ad80-ab9e2b521526\") " pod="openstack/ceilometer-0" Nov 21 14:46:52 crc kubenswrapper[5133]: I1121 14:46:52.450031 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/404313b1-20dd-4623-ad80-ab9e2b521526-log-httpd\") pod \"ceilometer-0\" (UID: \"404313b1-20dd-4623-ad80-ab9e2b521526\") " pod="openstack/ceilometer-0" Nov 21 14:46:52 crc kubenswrapper[5133]: I1121 14:46:52.475936 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c02422b4-72e1-4f4d-8efe-b4460618f820" path="/var/lib/kubelet/pods/c02422b4-72e1-4f4d-8efe-b4460618f820/volumes" Nov 21 14:46:52 crc kubenswrapper[5133]: I1121 14:46:52.551860 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/404313b1-20dd-4623-ad80-ab9e2b521526-config-data\") pod \"ceilometer-0\" (UID: \"404313b1-20dd-4623-ad80-ab9e2b521526\") " pod="openstack/ceilometer-0" Nov 21 14:46:52 crc kubenswrapper[5133]: I1121 14:46:52.551948 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/404313b1-20dd-4623-ad80-ab9e2b521526-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"404313b1-20dd-4623-ad80-ab9e2b521526\") " pod="openstack/ceilometer-0" Nov 21 14:46:52 crc kubenswrapper[5133]: I1121 14:46:52.551969 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/404313b1-20dd-4623-ad80-ab9e2b521526-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"404313b1-20dd-4623-ad80-ab9e2b521526\") " pod="openstack/ceilometer-0" Nov 21 14:46:52 crc kubenswrapper[5133]: I1121 14:46:52.552016 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/404313b1-20dd-4623-ad80-ab9e2b521526-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"404313b1-20dd-4623-ad80-ab9e2b521526\") " pod="openstack/ceilometer-0" Nov 21 14:46:52 crc kubenswrapper[5133]: I1121 14:46:52.552036 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g965t\" (UniqueName: \"kubernetes.io/projected/404313b1-20dd-4623-ad80-ab9e2b521526-kube-api-access-g965t\") pod \"ceilometer-0\" (UID: \"404313b1-20dd-4623-ad80-ab9e2b521526\") " pod="openstack/ceilometer-0" Nov 21 14:46:52 crc kubenswrapper[5133]: I1121 14:46:52.552097 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/404313b1-20dd-4623-ad80-ab9e2b521526-scripts\") pod \"ceilometer-0\" (UID: \"404313b1-20dd-4623-ad80-ab9e2b521526\") " pod="openstack/ceilometer-0" Nov 21 14:46:52 crc kubenswrapper[5133]: I1121 14:46:52.552113 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/404313b1-20dd-4623-ad80-ab9e2b521526-run-httpd\") pod \"ceilometer-0\" (UID: \"404313b1-20dd-4623-ad80-ab9e2b521526\") " pod="openstack/ceilometer-0" Nov 21 14:46:52 crc kubenswrapper[5133]: I1121 14:46:52.552135 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/404313b1-20dd-4623-ad80-ab9e2b521526-log-httpd\") pod \"ceilometer-0\" (UID: \"404313b1-20dd-4623-ad80-ab9e2b521526\") " pod="openstack/ceilometer-0" Nov 21 14:46:52 crc kubenswrapper[5133]: I1121 14:46:52.553033 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/404313b1-20dd-4623-ad80-ab9e2b521526-log-httpd\") pod \"ceilometer-0\" (UID: \"404313b1-20dd-4623-ad80-ab9e2b521526\") " pod="openstack/ceilometer-0" Nov 21 14:46:52 crc kubenswrapper[5133]: I1121 14:46:52.553151 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/404313b1-20dd-4623-ad80-ab9e2b521526-run-httpd\") pod \"ceilometer-0\" (UID: \"404313b1-20dd-4623-ad80-ab9e2b521526\") " pod="openstack/ceilometer-0" Nov 21 14:46:52 crc kubenswrapper[5133]: I1121 14:46:52.556699 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/404313b1-20dd-4623-ad80-ab9e2b521526-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"404313b1-20dd-4623-ad80-ab9e2b521526\") " pod="openstack/ceilometer-0" Nov 21 14:46:52 crc kubenswrapper[5133]: I1121 14:46:52.557428 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/404313b1-20dd-4623-ad80-ab9e2b521526-scripts\") pod \"ceilometer-0\" (UID: \"404313b1-20dd-4623-ad80-ab9e2b521526\") " pod="openstack/ceilometer-0" Nov 21 14:46:52 crc kubenswrapper[5133]: I1121 14:46:52.557742 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/404313b1-20dd-4623-ad80-ab9e2b521526-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"404313b1-20dd-4623-ad80-ab9e2b521526\") " pod="openstack/ceilometer-0" Nov 21 14:46:52 crc kubenswrapper[5133]: I1121 14:46:52.559055 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/404313b1-20dd-4623-ad80-ab9e2b521526-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"404313b1-20dd-4623-ad80-ab9e2b521526\") " pod="openstack/ceilometer-0" Nov 21 14:46:52 crc kubenswrapper[5133]: I1121 14:46:52.560415 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/404313b1-20dd-4623-ad80-ab9e2b521526-config-data\") pod \"ceilometer-0\" (UID: \"404313b1-20dd-4623-ad80-ab9e2b521526\") " pod="openstack/ceilometer-0" Nov 21 14:46:52 crc kubenswrapper[5133]: I1121 14:46:52.570228 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g965t\" (UniqueName: \"kubernetes.io/projected/404313b1-20dd-4623-ad80-ab9e2b521526-kube-api-access-g965t\") pod \"ceilometer-0\" (UID: \"404313b1-20dd-4623-ad80-ab9e2b521526\") " pod="openstack/ceilometer-0" Nov 21 14:46:52 crc kubenswrapper[5133]: I1121 14:46:52.607907 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 21 14:46:53 crc kubenswrapper[5133]: W1121 14:46:53.107353 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod404313b1_20dd_4623_ad80_ab9e2b521526.slice/crio-3c4ce7f7274ffa1063bea8ce06a2dc6f531e9d379a8d8016f01c4615924f7b0a WatchSource:0}: Error finding container 3c4ce7f7274ffa1063bea8ce06a2dc6f531e9d379a8d8016f01c4615924f7b0a: Status 404 returned error can't find the container with id 3c4ce7f7274ffa1063bea8ce06a2dc6f531e9d379a8d8016f01c4615924f7b0a Nov 21 14:46:53 crc kubenswrapper[5133]: I1121 14:46:53.110864 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 21 14:46:53 crc kubenswrapper[5133]: I1121 14:46:53.206356 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"404313b1-20dd-4623-ad80-ab9e2b521526","Type":"ContainerStarted","Data":"3c4ce7f7274ffa1063bea8ce06a2dc6f531e9d379a8d8016f01c4615924f7b0a"} Nov 21 14:46:54 crc kubenswrapper[5133]: I1121 14:46:54.220644 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"404313b1-20dd-4623-ad80-ab9e2b521526","Type":"ContainerStarted","Data":"5c20888c26e62ce81d102b766dd9812cc7fb315fb346cb04a43639e28354e6bf"} Nov 21 14:46:55 crc kubenswrapper[5133]: I1121 14:46:55.111555 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/manila-share-share1-0" Nov 21 14:46:55 crc kubenswrapper[5133]: I1121 14:46:55.179680 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-share-share1-0"] Nov 21 14:46:55 crc kubenswrapper[5133]: I1121 14:46:55.232948 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"404313b1-20dd-4623-ad80-ab9e2b521526","Type":"ContainerStarted","Data":"de0ec1489169d9c55b678a6b95679836d32d29cc6419acb9c238a31b27764365"} Nov 21 14:46:55 crc kubenswrapper[5133]: I1121 14:46:55.233083 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/manila-share-share1-0" podUID="d68c59a3-abc6-4cdd-bdfa-f3eac726fa81" containerName="manila-share" containerID="cri-o://9d1884d4fd74d01dad9ad4112c918cdbf473493041982b768cf400fcf954e9f8" gracePeriod=30 Nov 21 14:46:55 crc kubenswrapper[5133]: I1121 14:46:55.233133 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/manila-share-share1-0" podUID="d68c59a3-abc6-4cdd-bdfa-f3eac726fa81" containerName="probe" containerID="cri-o://89dc4267dc5a9be3c23a797689f0c842e0f70bca5e16c8f6fc959353e37f64da" gracePeriod=30 Nov 21 14:46:56 crc kubenswrapper[5133]: I1121 14:46:56.249590 5133 generic.go:334] "Generic (PLEG): container finished" podID="d68c59a3-abc6-4cdd-bdfa-f3eac726fa81" containerID="89dc4267dc5a9be3c23a797689f0c842e0f70bca5e16c8f6fc959353e37f64da" exitCode=0 Nov 21 14:46:56 crc kubenswrapper[5133]: I1121 14:46:56.250070 5133 generic.go:334] "Generic (PLEG): container finished" podID="d68c59a3-abc6-4cdd-bdfa-f3eac726fa81" containerID="9d1884d4fd74d01dad9ad4112c918cdbf473493041982b768cf400fcf954e9f8" exitCode=1 Nov 21 14:46:56 crc kubenswrapper[5133]: I1121 14:46:56.249647 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"d68c59a3-abc6-4cdd-bdfa-f3eac726fa81","Type":"ContainerDied","Data":"89dc4267dc5a9be3c23a797689f0c842e0f70bca5e16c8f6fc959353e37f64da"} Nov 21 14:46:56 crc kubenswrapper[5133]: I1121 14:46:56.250188 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"d68c59a3-abc6-4cdd-bdfa-f3eac726fa81","Type":"ContainerDied","Data":"9d1884d4fd74d01dad9ad4112c918cdbf473493041982b768cf400fcf954e9f8"} Nov 21 14:46:56 crc kubenswrapper[5133]: I1121 14:46:56.254031 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"404313b1-20dd-4623-ad80-ab9e2b521526","Type":"ContainerStarted","Data":"f8672bb8451e93b8a21bbecb8ebdc19572aede0d412ec6603f5c63b95d37462c"} Nov 21 14:46:56 crc kubenswrapper[5133]: I1121 14:46:56.421981 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-share-share1-0" Nov 21 14:46:56 crc kubenswrapper[5133]: I1121 14:46:56.550242 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d68c59a3-abc6-4cdd-bdfa-f3eac726fa81-combined-ca-bundle\") pod \"d68c59a3-abc6-4cdd-bdfa-f3eac726fa81\" (UID: \"d68c59a3-abc6-4cdd-bdfa-f3eac726fa81\") " Nov 21 14:46:56 crc kubenswrapper[5133]: I1121 14:46:56.552128 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z2x2w\" (UniqueName: \"kubernetes.io/projected/d68c59a3-abc6-4cdd-bdfa-f3eac726fa81-kube-api-access-z2x2w\") pod \"d68c59a3-abc6-4cdd-bdfa-f3eac726fa81\" (UID: \"d68c59a3-abc6-4cdd-bdfa-f3eac726fa81\") " Nov 21 14:46:56 crc kubenswrapper[5133]: I1121 14:46:56.552274 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/d68c59a3-abc6-4cdd-bdfa-f3eac726fa81-ceph\") pod \"d68c59a3-abc6-4cdd-bdfa-f3eac726fa81\" (UID: \"d68c59a3-abc6-4cdd-bdfa-f3eac726fa81\") " Nov 21 14:46:56 crc kubenswrapper[5133]: I1121 14:46:56.552367 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/d68c59a3-abc6-4cdd-bdfa-f3eac726fa81-var-lib-manila\") pod \"d68c59a3-abc6-4cdd-bdfa-f3eac726fa81\" (UID: \"d68c59a3-abc6-4cdd-bdfa-f3eac726fa81\") " Nov 21 14:46:56 crc kubenswrapper[5133]: I1121 14:46:56.552597 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d68c59a3-abc6-4cdd-bdfa-f3eac726fa81-etc-machine-id\") pod \"d68c59a3-abc6-4cdd-bdfa-f3eac726fa81\" (UID: \"d68c59a3-abc6-4cdd-bdfa-f3eac726fa81\") " Nov 21 14:46:56 crc kubenswrapper[5133]: I1121 14:46:56.552848 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d68c59a3-abc6-4cdd-bdfa-f3eac726fa81-config-data-custom\") pod \"d68c59a3-abc6-4cdd-bdfa-f3eac726fa81\" (UID: \"d68c59a3-abc6-4cdd-bdfa-f3eac726fa81\") " Nov 21 14:46:56 crc kubenswrapper[5133]: I1121 14:46:56.552973 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d68c59a3-abc6-4cdd-bdfa-f3eac726fa81-config-data\") pod \"d68c59a3-abc6-4cdd-bdfa-f3eac726fa81\" (UID: \"d68c59a3-abc6-4cdd-bdfa-f3eac726fa81\") " Nov 21 14:46:56 crc kubenswrapper[5133]: I1121 14:46:56.553132 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d68c59a3-abc6-4cdd-bdfa-f3eac726fa81-scripts\") pod \"d68c59a3-abc6-4cdd-bdfa-f3eac726fa81\" (UID: \"d68c59a3-abc6-4cdd-bdfa-f3eac726fa81\") " Nov 21 14:46:56 crc kubenswrapper[5133]: I1121 14:46:56.554201 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d68c59a3-abc6-4cdd-bdfa-f3eac726fa81-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "d68c59a3-abc6-4cdd-bdfa-f3eac726fa81" (UID: "d68c59a3-abc6-4cdd-bdfa-f3eac726fa81"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 14:46:56 crc kubenswrapper[5133]: I1121 14:46:56.554508 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d68c59a3-abc6-4cdd-bdfa-f3eac726fa81-var-lib-manila" (OuterVolumeSpecName: "var-lib-manila") pod "d68c59a3-abc6-4cdd-bdfa-f3eac726fa81" (UID: "d68c59a3-abc6-4cdd-bdfa-f3eac726fa81"). InnerVolumeSpecName "var-lib-manila". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 14:46:56 crc kubenswrapper[5133]: I1121 14:46:56.658549 5133 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d68c59a3-abc6-4cdd-bdfa-f3eac726fa81-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 21 14:46:56 crc kubenswrapper[5133]: I1121 14:46:56.658602 5133 reconciler_common.go:293] "Volume detached for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/d68c59a3-abc6-4cdd-bdfa-f3eac726fa81-var-lib-manila\") on node \"crc\" DevicePath \"\"" Nov 21 14:46:56 crc kubenswrapper[5133]: I1121 14:46:56.944745 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d68c59a3-abc6-4cdd-bdfa-f3eac726fa81-scripts" (OuterVolumeSpecName: "scripts") pod "d68c59a3-abc6-4cdd-bdfa-f3eac726fa81" (UID: "d68c59a3-abc6-4cdd-bdfa-f3eac726fa81"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:46:56 crc kubenswrapper[5133]: I1121 14:46:56.945200 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d68c59a3-abc6-4cdd-bdfa-f3eac726fa81-ceph" (OuterVolumeSpecName: "ceph") pod "d68c59a3-abc6-4cdd-bdfa-f3eac726fa81" (UID: "d68c59a3-abc6-4cdd-bdfa-f3eac726fa81"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:46:56 crc kubenswrapper[5133]: I1121 14:46:56.946085 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d68c59a3-abc6-4cdd-bdfa-f3eac726fa81-kube-api-access-z2x2w" (OuterVolumeSpecName: "kube-api-access-z2x2w") pod "d68c59a3-abc6-4cdd-bdfa-f3eac726fa81" (UID: "d68c59a3-abc6-4cdd-bdfa-f3eac726fa81"). InnerVolumeSpecName "kube-api-access-z2x2w". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:46:56 crc kubenswrapper[5133]: I1121 14:46:56.946158 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d68c59a3-abc6-4cdd-bdfa-f3eac726fa81-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "d68c59a3-abc6-4cdd-bdfa-f3eac726fa81" (UID: "d68c59a3-abc6-4cdd-bdfa-f3eac726fa81"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:46:56 crc kubenswrapper[5133]: I1121 14:46:56.966627 5133 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d68c59a3-abc6-4cdd-bdfa-f3eac726fa81-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 21 14:46:56 crc kubenswrapper[5133]: I1121 14:46:56.966674 5133 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d68c59a3-abc6-4cdd-bdfa-f3eac726fa81-scripts\") on node \"crc\" DevicePath \"\"" Nov 21 14:46:56 crc kubenswrapper[5133]: I1121 14:46:56.966685 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z2x2w\" (UniqueName: \"kubernetes.io/projected/d68c59a3-abc6-4cdd-bdfa-f3eac726fa81-kube-api-access-z2x2w\") on node \"crc\" DevicePath \"\"" Nov 21 14:46:56 crc kubenswrapper[5133]: I1121 14:46:56.966696 5133 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/d68c59a3-abc6-4cdd-bdfa-f3eac726fa81-ceph\") on node \"crc\" DevicePath \"\"" Nov 21 14:46:56 crc kubenswrapper[5133]: I1121 14:46:56.971911 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d68c59a3-abc6-4cdd-bdfa-f3eac726fa81-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d68c59a3-abc6-4cdd-bdfa-f3eac726fa81" (UID: "d68c59a3-abc6-4cdd-bdfa-f3eac726fa81"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:46:57 crc kubenswrapper[5133]: I1121 14:46:57.065821 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d68c59a3-abc6-4cdd-bdfa-f3eac726fa81-config-data" (OuterVolumeSpecName: "config-data") pod "d68c59a3-abc6-4cdd-bdfa-f3eac726fa81" (UID: "d68c59a3-abc6-4cdd-bdfa-f3eac726fa81"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 14:46:57 crc kubenswrapper[5133]: I1121 14:46:57.068228 5133 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d68c59a3-abc6-4cdd-bdfa-f3eac726fa81-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 14:46:57 crc kubenswrapper[5133]: I1121 14:46:57.068253 5133 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d68c59a3-abc6-4cdd-bdfa-f3eac726fa81-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 14:46:57 crc kubenswrapper[5133]: I1121 14:46:57.273108 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"d68c59a3-abc6-4cdd-bdfa-f3eac726fa81","Type":"ContainerDied","Data":"8a65168923f92843875ad4b5571ba383588666664c3deaeacb81ed586418d6f1"} Nov 21 14:46:57 crc kubenswrapper[5133]: I1121 14:46:57.273523 5133 scope.go:117] "RemoveContainer" containerID="89dc4267dc5a9be3c23a797689f0c842e0f70bca5e16c8f6fc959353e37f64da" Nov 21 14:46:57 crc kubenswrapper[5133]: I1121 14:46:57.273760 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-share-share1-0" Nov 21 14:46:57 crc kubenswrapper[5133]: I1121 14:46:57.305280 5133 scope.go:117] "RemoveContainer" containerID="9d1884d4fd74d01dad9ad4112c918cdbf473493041982b768cf400fcf954e9f8" Nov 21 14:46:57 crc kubenswrapper[5133]: I1121 14:46:57.313119 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-share-share1-0"] Nov 21 14:46:57 crc kubenswrapper[5133]: I1121 14:46:57.320618 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/manila-share-share1-0"] Nov 21 14:46:57 crc kubenswrapper[5133]: I1121 14:46:57.341859 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-share-share1-0"] Nov 21 14:46:57 crc kubenswrapper[5133]: E1121 14:46:57.342246 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d68c59a3-abc6-4cdd-bdfa-f3eac726fa81" containerName="manila-share" Nov 21 14:46:57 crc kubenswrapper[5133]: I1121 14:46:57.342264 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="d68c59a3-abc6-4cdd-bdfa-f3eac726fa81" containerName="manila-share" Nov 21 14:46:57 crc kubenswrapper[5133]: E1121 14:46:57.342297 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d68c59a3-abc6-4cdd-bdfa-f3eac726fa81" containerName="probe" Nov 21 14:46:57 crc kubenswrapper[5133]: I1121 14:46:57.342304 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="d68c59a3-abc6-4cdd-bdfa-f3eac726fa81" containerName="probe" Nov 21 14:46:57 crc kubenswrapper[5133]: I1121 14:46:57.342491 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="d68c59a3-abc6-4cdd-bdfa-f3eac726fa81" containerName="manila-share" Nov 21 14:46:57 crc kubenswrapper[5133]: I1121 14:46:57.342518 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="d68c59a3-abc6-4cdd-bdfa-f3eac726fa81" containerName="probe" Nov 21 14:46:57 crc kubenswrapper[5133]: I1121 14:46:57.352902 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-share-share1-0" Nov 21 14:46:57 crc kubenswrapper[5133]: I1121 14:46:57.357286 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-share-share1-config-data" Nov 21 14:46:57 crc kubenswrapper[5133]: I1121 14:46:57.378372 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-share-share1-0"] Nov 21 14:46:57 crc kubenswrapper[5133]: I1121 14:46:57.479158 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k54d6\" (UniqueName: \"kubernetes.io/projected/319ced81-67f1-4bb8-b896-c6f74ce4cfc0-kube-api-access-k54d6\") pod \"manila-share-share1-0\" (UID: \"319ced81-67f1-4bb8-b896-c6f74ce4cfc0\") " pod="openstack/manila-share-share1-0" Nov 21 14:46:57 crc kubenswrapper[5133]: I1121 14:46:57.479282 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/319ced81-67f1-4bb8-b896-c6f74ce4cfc0-etc-machine-id\") pod \"manila-share-share1-0\" (UID: \"319ced81-67f1-4bb8-b896-c6f74ce4cfc0\") " pod="openstack/manila-share-share1-0" Nov 21 14:46:57 crc kubenswrapper[5133]: I1121 14:46:57.479321 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/319ced81-67f1-4bb8-b896-c6f74ce4cfc0-var-lib-manila\") pod \"manila-share-share1-0\" (UID: \"319ced81-67f1-4bb8-b896-c6f74ce4cfc0\") " pod="openstack/manila-share-share1-0" Nov 21 14:46:57 crc kubenswrapper[5133]: I1121 14:46:57.479439 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/319ced81-67f1-4bb8-b896-c6f74ce4cfc0-scripts\") pod \"manila-share-share1-0\" (UID: \"319ced81-67f1-4bb8-b896-c6f74ce4cfc0\") " pod="openstack/manila-share-share1-0" Nov 21 14:46:57 crc kubenswrapper[5133]: I1121 14:46:57.479491 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/319ced81-67f1-4bb8-b896-c6f74ce4cfc0-config-data\") pod \"manila-share-share1-0\" (UID: \"319ced81-67f1-4bb8-b896-c6f74ce4cfc0\") " pod="openstack/manila-share-share1-0" Nov 21 14:46:57 crc kubenswrapper[5133]: I1121 14:46:57.479541 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/319ced81-67f1-4bb8-b896-c6f74ce4cfc0-combined-ca-bundle\") pod \"manila-share-share1-0\" (UID: \"319ced81-67f1-4bb8-b896-c6f74ce4cfc0\") " pod="openstack/manila-share-share1-0" Nov 21 14:46:57 crc kubenswrapper[5133]: I1121 14:46:57.479588 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/319ced81-67f1-4bb8-b896-c6f74ce4cfc0-ceph\") pod \"manila-share-share1-0\" (UID: \"319ced81-67f1-4bb8-b896-c6f74ce4cfc0\") " pod="openstack/manila-share-share1-0" Nov 21 14:46:57 crc kubenswrapper[5133]: I1121 14:46:57.479627 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/319ced81-67f1-4bb8-b896-c6f74ce4cfc0-config-data-custom\") pod \"manila-share-share1-0\" (UID: \"319ced81-67f1-4bb8-b896-c6f74ce4cfc0\") " pod="openstack/manila-share-share1-0" Nov 21 14:46:57 crc kubenswrapper[5133]: I1121 14:46:57.581329 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/319ced81-67f1-4bb8-b896-c6f74ce4cfc0-etc-machine-id\") pod \"manila-share-share1-0\" (UID: \"319ced81-67f1-4bb8-b896-c6f74ce4cfc0\") " pod="openstack/manila-share-share1-0" Nov 21 14:46:57 crc kubenswrapper[5133]: I1121 14:46:57.581380 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/319ced81-67f1-4bb8-b896-c6f74ce4cfc0-var-lib-manila\") pod \"manila-share-share1-0\" (UID: \"319ced81-67f1-4bb8-b896-c6f74ce4cfc0\") " pod="openstack/manila-share-share1-0" Nov 21 14:46:57 crc kubenswrapper[5133]: I1121 14:46:57.581522 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/319ced81-67f1-4bb8-b896-c6f74ce4cfc0-etc-machine-id\") pod \"manila-share-share1-0\" (UID: \"319ced81-67f1-4bb8-b896-c6f74ce4cfc0\") " pod="openstack/manila-share-share1-0" Nov 21 14:46:57 crc kubenswrapper[5133]: I1121 14:46:57.581659 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/319ced81-67f1-4bb8-b896-c6f74ce4cfc0-scripts\") pod \"manila-share-share1-0\" (UID: \"319ced81-67f1-4bb8-b896-c6f74ce4cfc0\") " pod="openstack/manila-share-share1-0" Nov 21 14:46:57 crc kubenswrapper[5133]: I1121 14:46:57.581767 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/319ced81-67f1-4bb8-b896-c6f74ce4cfc0-config-data\") pod \"manila-share-share1-0\" (UID: \"319ced81-67f1-4bb8-b896-c6f74ce4cfc0\") " pod="openstack/manila-share-share1-0" Nov 21 14:46:57 crc kubenswrapper[5133]: I1121 14:46:57.581846 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/319ced81-67f1-4bb8-b896-c6f74ce4cfc0-combined-ca-bundle\") pod \"manila-share-share1-0\" (UID: \"319ced81-67f1-4bb8-b896-c6f74ce4cfc0\") " pod="openstack/manila-share-share1-0" Nov 21 14:46:57 crc kubenswrapper[5133]: I1121 14:46:57.581905 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/319ced81-67f1-4bb8-b896-c6f74ce4cfc0-ceph\") pod \"manila-share-share1-0\" (UID: \"319ced81-67f1-4bb8-b896-c6f74ce4cfc0\") " pod="openstack/manila-share-share1-0" Nov 21 14:46:57 crc kubenswrapper[5133]: I1121 14:46:57.581944 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/319ced81-67f1-4bb8-b896-c6f74ce4cfc0-config-data-custom\") pod \"manila-share-share1-0\" (UID: \"319ced81-67f1-4bb8-b896-c6f74ce4cfc0\") " pod="openstack/manila-share-share1-0" Nov 21 14:46:57 crc kubenswrapper[5133]: I1121 14:46:57.582041 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/319ced81-67f1-4bb8-b896-c6f74ce4cfc0-var-lib-manila\") pod \"manila-share-share1-0\" (UID: \"319ced81-67f1-4bb8-b896-c6f74ce4cfc0\") " pod="openstack/manila-share-share1-0" Nov 21 14:46:57 crc kubenswrapper[5133]: I1121 14:46:57.582702 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k54d6\" (UniqueName: \"kubernetes.io/projected/319ced81-67f1-4bb8-b896-c6f74ce4cfc0-kube-api-access-k54d6\") pod \"manila-share-share1-0\" (UID: \"319ced81-67f1-4bb8-b896-c6f74ce4cfc0\") " pod="openstack/manila-share-share1-0" Nov 21 14:46:57 crc kubenswrapper[5133]: I1121 14:46:57.588551 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/319ced81-67f1-4bb8-b896-c6f74ce4cfc0-combined-ca-bundle\") pod \"manila-share-share1-0\" (UID: \"319ced81-67f1-4bb8-b896-c6f74ce4cfc0\") " pod="openstack/manila-share-share1-0" Nov 21 14:46:57 crc kubenswrapper[5133]: I1121 14:46:57.596288 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/319ced81-67f1-4bb8-b896-c6f74ce4cfc0-ceph\") pod \"manila-share-share1-0\" (UID: \"319ced81-67f1-4bb8-b896-c6f74ce4cfc0\") " pod="openstack/manila-share-share1-0" Nov 21 14:46:57 crc kubenswrapper[5133]: I1121 14:46:57.597782 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/319ced81-67f1-4bb8-b896-c6f74ce4cfc0-config-data\") pod \"manila-share-share1-0\" (UID: \"319ced81-67f1-4bb8-b896-c6f74ce4cfc0\") " pod="openstack/manila-share-share1-0" Nov 21 14:46:57 crc kubenswrapper[5133]: I1121 14:46:57.599077 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/319ced81-67f1-4bb8-b896-c6f74ce4cfc0-scripts\") pod \"manila-share-share1-0\" (UID: \"319ced81-67f1-4bb8-b896-c6f74ce4cfc0\") " pod="openstack/manila-share-share1-0" Nov 21 14:46:57 crc kubenswrapper[5133]: I1121 14:46:57.599868 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/319ced81-67f1-4bb8-b896-c6f74ce4cfc0-config-data-custom\") pod \"manila-share-share1-0\" (UID: \"319ced81-67f1-4bb8-b896-c6f74ce4cfc0\") " pod="openstack/manila-share-share1-0" Nov 21 14:46:57 crc kubenswrapper[5133]: I1121 14:46:57.602742 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k54d6\" (UniqueName: \"kubernetes.io/projected/319ced81-67f1-4bb8-b896-c6f74ce4cfc0-kube-api-access-k54d6\") pod \"manila-share-share1-0\" (UID: \"319ced81-67f1-4bb8-b896-c6f74ce4cfc0\") " pod="openstack/manila-share-share1-0" Nov 21 14:46:57 crc kubenswrapper[5133]: I1121 14:46:57.673513 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-share-share1-0" Nov 21 14:46:58 crc kubenswrapper[5133]: I1121 14:46:58.286049 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"404313b1-20dd-4623-ad80-ab9e2b521526","Type":"ContainerStarted","Data":"88c1b5b30751e3563c0192fd5ddf5e6ba5612490767b3544a0a37e8113c79b33"} Nov 21 14:46:58 crc kubenswrapper[5133]: I1121 14:46:58.286462 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 21 14:46:58 crc kubenswrapper[5133]: I1121 14:46:58.326607 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.6106778419999999 podStartE2EDuration="6.32659122s" podCreationTimestamp="2025-11-21 14:46:52 +0000 UTC" firstStartedPulling="2025-11-21 14:46:53.109805388 +0000 UTC m=+3872.907637636" lastFinishedPulling="2025-11-21 14:46:57.825718766 +0000 UTC m=+3877.623551014" observedRunningTime="2025-11-21 14:46:58.318437254 +0000 UTC m=+3878.116269512" watchObservedRunningTime="2025-11-21 14:46:58.32659122 +0000 UTC m=+3878.124423468" Nov 21 14:46:58 crc kubenswrapper[5133]: W1121 14:46:58.401834 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod319ced81_67f1_4bb8_b896_c6f74ce4cfc0.slice/crio-43b80dbf7ff6f0abdaff05a7f16c19a6c2ee09e63f8d6a420f515c14d30b086c WatchSource:0}: Error finding container 43b80dbf7ff6f0abdaff05a7f16c19a6c2ee09e63f8d6a420f515c14d30b086c: Status 404 returned error can't find the container with id 43b80dbf7ff6f0abdaff05a7f16c19a6c2ee09e63f8d6a420f515c14d30b086c Nov 21 14:46:58 crc kubenswrapper[5133]: I1121 14:46:58.403374 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-share-share1-0"] Nov 21 14:46:58 crc kubenswrapper[5133]: I1121 14:46:58.469192 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d68c59a3-abc6-4cdd-bdfa-f3eac726fa81" path="/var/lib/kubelet/pods/d68c59a3-abc6-4cdd-bdfa-f3eac726fa81/volumes" Nov 21 14:46:59 crc kubenswrapper[5133]: I1121 14:46:59.303546 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"319ced81-67f1-4bb8-b896-c6f74ce4cfc0","Type":"ContainerStarted","Data":"644b15465b91f7cdbf9ec21d39e48cfb4cd9079dd3656b1111a972c24bb5fbaf"} Nov 21 14:46:59 crc kubenswrapper[5133]: I1121 14:46:59.304326 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"319ced81-67f1-4bb8-b896-c6f74ce4cfc0","Type":"ContainerStarted","Data":"43b80dbf7ff6f0abdaff05a7f16c19a6c2ee09e63f8d6a420f515c14d30b086c"} Nov 21 14:46:59 crc kubenswrapper[5133]: I1121 14:46:59.875120 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/manila-scheduler-0" Nov 21 14:47:00 crc kubenswrapper[5133]: I1121 14:47:00.330109 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"319ced81-67f1-4bb8-b896-c6f74ce4cfc0","Type":"ContainerStarted","Data":"e238ac7a87887f7ce8b06720af3062c1bd25f67a180cbeb1d8bbfc510a53db69"} Nov 21 14:47:00 crc kubenswrapper[5133]: I1121 14:47:00.352848 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-share-share1-0" podStartSLOduration=3.352831658 podStartE2EDuration="3.352831658s" podCreationTimestamp="2025-11-21 14:46:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 14:47:00.349761787 +0000 UTC m=+3880.147594035" watchObservedRunningTime="2025-11-21 14:47:00.352831658 +0000 UTC m=+3880.150663896" Nov 21 14:47:01 crc kubenswrapper[5133]: I1121 14:47:01.458449 5133 scope.go:117] "RemoveContainer" containerID="8580d7a7211cee4078050130d68876fc21d141fe3a8eaa4aa514ada3bc5ab459" Nov 21 14:47:01 crc kubenswrapper[5133]: E1121 14:47:01.458809 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:47:07 crc kubenswrapper[5133]: I1121 14:47:07.674239 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/manila-share-share1-0" Nov 21 14:47:10 crc kubenswrapper[5133]: I1121 14:47:10.824976 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-79wcz"] Nov 21 14:47:10 crc kubenswrapper[5133]: I1121 14:47:10.840795 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-79wcz"] Nov 21 14:47:10 crc kubenswrapper[5133]: I1121 14:47:10.840918 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-79wcz" Nov 21 14:47:10 crc kubenswrapper[5133]: I1121 14:47:10.966488 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7m8xd\" (UniqueName: \"kubernetes.io/projected/dd8b637b-923e-4e01-86f2-5f28f67490fc-kube-api-access-7m8xd\") pod \"community-operators-79wcz\" (UID: \"dd8b637b-923e-4e01-86f2-5f28f67490fc\") " pod="openshift-marketplace/community-operators-79wcz" Nov 21 14:47:10 crc kubenswrapper[5133]: I1121 14:47:10.966662 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dd8b637b-923e-4e01-86f2-5f28f67490fc-utilities\") pod \"community-operators-79wcz\" (UID: \"dd8b637b-923e-4e01-86f2-5f28f67490fc\") " pod="openshift-marketplace/community-operators-79wcz" Nov 21 14:47:10 crc kubenswrapper[5133]: I1121 14:47:10.966697 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dd8b637b-923e-4e01-86f2-5f28f67490fc-catalog-content\") pod \"community-operators-79wcz\" (UID: \"dd8b637b-923e-4e01-86f2-5f28f67490fc\") " pod="openshift-marketplace/community-operators-79wcz" Nov 21 14:47:11 crc kubenswrapper[5133]: I1121 14:47:11.068837 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7m8xd\" (UniqueName: \"kubernetes.io/projected/dd8b637b-923e-4e01-86f2-5f28f67490fc-kube-api-access-7m8xd\") pod \"community-operators-79wcz\" (UID: \"dd8b637b-923e-4e01-86f2-5f28f67490fc\") " pod="openshift-marketplace/community-operators-79wcz" Nov 21 14:47:11 crc kubenswrapper[5133]: I1121 14:47:11.068950 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dd8b637b-923e-4e01-86f2-5f28f67490fc-utilities\") pod \"community-operators-79wcz\" (UID: \"dd8b637b-923e-4e01-86f2-5f28f67490fc\") " pod="openshift-marketplace/community-operators-79wcz" Nov 21 14:47:11 crc kubenswrapper[5133]: I1121 14:47:11.068968 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dd8b637b-923e-4e01-86f2-5f28f67490fc-catalog-content\") pod \"community-operators-79wcz\" (UID: \"dd8b637b-923e-4e01-86f2-5f28f67490fc\") " pod="openshift-marketplace/community-operators-79wcz" Nov 21 14:47:11 crc kubenswrapper[5133]: I1121 14:47:11.069412 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dd8b637b-923e-4e01-86f2-5f28f67490fc-catalog-content\") pod \"community-operators-79wcz\" (UID: \"dd8b637b-923e-4e01-86f2-5f28f67490fc\") " pod="openshift-marketplace/community-operators-79wcz" Nov 21 14:47:11 crc kubenswrapper[5133]: I1121 14:47:11.069537 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dd8b637b-923e-4e01-86f2-5f28f67490fc-utilities\") pod \"community-operators-79wcz\" (UID: \"dd8b637b-923e-4e01-86f2-5f28f67490fc\") " pod="openshift-marketplace/community-operators-79wcz" Nov 21 14:47:11 crc kubenswrapper[5133]: I1121 14:47:11.088217 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7m8xd\" (UniqueName: \"kubernetes.io/projected/dd8b637b-923e-4e01-86f2-5f28f67490fc-kube-api-access-7m8xd\") pod \"community-operators-79wcz\" (UID: \"dd8b637b-923e-4e01-86f2-5f28f67490fc\") " pod="openshift-marketplace/community-operators-79wcz" Nov 21 14:47:11 crc kubenswrapper[5133]: I1121 14:47:11.178407 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-79wcz" Nov 21 14:47:11 crc kubenswrapper[5133]: I1121 14:47:11.601441 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/manila-scheduler-0" Nov 21 14:47:11 crc kubenswrapper[5133]: I1121 14:47:11.771816 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-79wcz"] Nov 21 14:47:11 crc kubenswrapper[5133]: W1121 14:47:11.773588 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddd8b637b_923e_4e01_86f2_5f28f67490fc.slice/crio-0768c88352dbee739966b01266f1b151eedca0b21442fc85fdde0351eab2286f WatchSource:0}: Error finding container 0768c88352dbee739966b01266f1b151eedca0b21442fc85fdde0351eab2286f: Status 404 returned error can't find the container with id 0768c88352dbee739966b01266f1b151eedca0b21442fc85fdde0351eab2286f Nov 21 14:47:12 crc kubenswrapper[5133]: I1121 14:47:12.675195 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-79wcz" event={"ID":"dd8b637b-923e-4e01-86f2-5f28f67490fc","Type":"ContainerStarted","Data":"0768c88352dbee739966b01266f1b151eedca0b21442fc85fdde0351eab2286f"} Nov 21 14:47:13 crc kubenswrapper[5133]: I1121 14:47:13.472463 5133 generic.go:334] "Generic (PLEG): container finished" podID="dd8b637b-923e-4e01-86f2-5f28f67490fc" containerID="2cd89eef13456236068f2eac65d4171883e37c51f9a7b3ba92f1d2dc127e0bc1" exitCode=0 Nov 21 14:47:13 crc kubenswrapper[5133]: I1121 14:47:13.472578 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-79wcz" event={"ID":"dd8b637b-923e-4e01-86f2-5f28f67490fc","Type":"ContainerDied","Data":"2cd89eef13456236068f2eac65d4171883e37c51f9a7b3ba92f1d2dc127e0bc1"} Nov 21 14:47:16 crc kubenswrapper[5133]: I1121 14:47:16.458792 5133 scope.go:117] "RemoveContainer" containerID="8580d7a7211cee4078050130d68876fc21d141fe3a8eaa4aa514ada3bc5ab459" Nov 21 14:47:16 crc kubenswrapper[5133]: E1121 14:47:16.459799 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:47:19 crc kubenswrapper[5133]: I1121 14:47:19.222226 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/manila-share-share1-0" Nov 21 14:47:19 crc kubenswrapper[5133]: I1121 14:47:19.539113 5133 generic.go:334] "Generic (PLEG): container finished" podID="dd8b637b-923e-4e01-86f2-5f28f67490fc" containerID="f1d63347c88d87b442275df4cc11e3db52a27f0edc99af395d050e8e3be744fe" exitCode=0 Nov 21 14:47:19 crc kubenswrapper[5133]: I1121 14:47:19.539155 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-79wcz" event={"ID":"dd8b637b-923e-4e01-86f2-5f28f67490fc","Type":"ContainerDied","Data":"f1d63347c88d87b442275df4cc11e3db52a27f0edc99af395d050e8e3be744fe"} Nov 21 14:47:22 crc kubenswrapper[5133]: I1121 14:47:22.622943 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 21 14:47:24 crc kubenswrapper[5133]: I1121 14:47:24.592899 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-79wcz" event={"ID":"dd8b637b-923e-4e01-86f2-5f28f67490fc","Type":"ContainerStarted","Data":"d0f63a3942b0f06d1b24af2e9d67b40968658464abdd34addf9ac0e787ba7e98"} Nov 21 14:47:24 crc kubenswrapper[5133]: I1121 14:47:24.611762 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-79wcz" podStartSLOduration=5.001698436 podStartE2EDuration="14.611740887s" podCreationTimestamp="2025-11-21 14:47:10 +0000 UTC" firstStartedPulling="2025-11-21 14:47:14.485297591 +0000 UTC m=+3894.283129839" lastFinishedPulling="2025-11-21 14:47:24.095340042 +0000 UTC m=+3903.893172290" observedRunningTime="2025-11-21 14:47:24.610215977 +0000 UTC m=+3904.408048255" watchObservedRunningTime="2025-11-21 14:47:24.611740887 +0000 UTC m=+3904.409573135" Nov 21 14:47:31 crc kubenswrapper[5133]: I1121 14:47:31.178616 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-79wcz" Nov 21 14:47:31 crc kubenswrapper[5133]: I1121 14:47:31.179091 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-79wcz" Nov 21 14:47:31 crc kubenswrapper[5133]: I1121 14:47:31.235890 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-79wcz" Nov 21 14:47:31 crc kubenswrapper[5133]: I1121 14:47:31.457611 5133 scope.go:117] "RemoveContainer" containerID="8580d7a7211cee4078050130d68876fc21d141fe3a8eaa4aa514ada3bc5ab459" Nov 21 14:47:31 crc kubenswrapper[5133]: E1121 14:47:31.457876 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:47:31 crc kubenswrapper[5133]: I1121 14:47:31.719095 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-79wcz" Nov 21 14:47:31 crc kubenswrapper[5133]: I1121 14:47:31.771304 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-79wcz"] Nov 21 14:47:33 crc kubenswrapper[5133]: I1121 14:47:33.688718 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-79wcz" podUID="dd8b637b-923e-4e01-86f2-5f28f67490fc" containerName="registry-server" containerID="cri-o://d0f63a3942b0f06d1b24af2e9d67b40968658464abdd34addf9ac0e787ba7e98" gracePeriod=2 Nov 21 14:47:34 crc kubenswrapper[5133]: I1121 14:47:34.699950 5133 generic.go:334] "Generic (PLEG): container finished" podID="dd8b637b-923e-4e01-86f2-5f28f67490fc" containerID="d0f63a3942b0f06d1b24af2e9d67b40968658464abdd34addf9ac0e787ba7e98" exitCode=0 Nov 21 14:47:34 crc kubenswrapper[5133]: I1121 14:47:34.700036 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-79wcz" event={"ID":"dd8b637b-923e-4e01-86f2-5f28f67490fc","Type":"ContainerDied","Data":"d0f63a3942b0f06d1b24af2e9d67b40968658464abdd34addf9ac0e787ba7e98"} Nov 21 14:47:34 crc kubenswrapper[5133]: I1121 14:47:34.814319 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-79wcz" Nov 21 14:47:34 crc kubenswrapper[5133]: I1121 14:47:34.866845 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dd8b637b-923e-4e01-86f2-5f28f67490fc-catalog-content\") pod \"dd8b637b-923e-4e01-86f2-5f28f67490fc\" (UID: \"dd8b637b-923e-4e01-86f2-5f28f67490fc\") " Nov 21 14:47:34 crc kubenswrapper[5133]: I1121 14:47:34.866968 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7m8xd\" (UniqueName: \"kubernetes.io/projected/dd8b637b-923e-4e01-86f2-5f28f67490fc-kube-api-access-7m8xd\") pod \"dd8b637b-923e-4e01-86f2-5f28f67490fc\" (UID: \"dd8b637b-923e-4e01-86f2-5f28f67490fc\") " Nov 21 14:47:34 crc kubenswrapper[5133]: I1121 14:47:34.867213 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dd8b637b-923e-4e01-86f2-5f28f67490fc-utilities\") pod \"dd8b637b-923e-4e01-86f2-5f28f67490fc\" (UID: \"dd8b637b-923e-4e01-86f2-5f28f67490fc\") " Nov 21 14:47:34 crc kubenswrapper[5133]: I1121 14:47:34.867936 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dd8b637b-923e-4e01-86f2-5f28f67490fc-utilities" (OuterVolumeSpecName: "utilities") pod "dd8b637b-923e-4e01-86f2-5f28f67490fc" (UID: "dd8b637b-923e-4e01-86f2-5f28f67490fc"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:47:34 crc kubenswrapper[5133]: I1121 14:47:34.887780 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dd8b637b-923e-4e01-86f2-5f28f67490fc-kube-api-access-7m8xd" (OuterVolumeSpecName: "kube-api-access-7m8xd") pod "dd8b637b-923e-4e01-86f2-5f28f67490fc" (UID: "dd8b637b-923e-4e01-86f2-5f28f67490fc"). InnerVolumeSpecName "kube-api-access-7m8xd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:47:34 crc kubenswrapper[5133]: I1121 14:47:34.912678 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dd8b637b-923e-4e01-86f2-5f28f67490fc-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "dd8b637b-923e-4e01-86f2-5f28f67490fc" (UID: "dd8b637b-923e-4e01-86f2-5f28f67490fc"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:47:34 crc kubenswrapper[5133]: I1121 14:47:34.969272 5133 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dd8b637b-923e-4e01-86f2-5f28f67490fc-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 14:47:34 crc kubenswrapper[5133]: I1121 14:47:34.969307 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7m8xd\" (UniqueName: \"kubernetes.io/projected/dd8b637b-923e-4e01-86f2-5f28f67490fc-kube-api-access-7m8xd\") on node \"crc\" DevicePath \"\"" Nov 21 14:47:34 crc kubenswrapper[5133]: I1121 14:47:34.969320 5133 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dd8b637b-923e-4e01-86f2-5f28f67490fc-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 14:47:35 crc kubenswrapper[5133]: I1121 14:47:35.711980 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-79wcz" event={"ID":"dd8b637b-923e-4e01-86f2-5f28f67490fc","Type":"ContainerDied","Data":"0768c88352dbee739966b01266f1b151eedca0b21442fc85fdde0351eab2286f"} Nov 21 14:47:35 crc kubenswrapper[5133]: I1121 14:47:35.712052 5133 scope.go:117] "RemoveContainer" containerID="d0f63a3942b0f06d1b24af2e9d67b40968658464abdd34addf9ac0e787ba7e98" Nov 21 14:47:35 crc kubenswrapper[5133]: I1121 14:47:35.712188 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-79wcz" Nov 21 14:47:35 crc kubenswrapper[5133]: I1121 14:47:35.756318 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-79wcz"] Nov 21 14:47:35 crc kubenswrapper[5133]: I1121 14:47:35.764658 5133 scope.go:117] "RemoveContainer" containerID="f1d63347c88d87b442275df4cc11e3db52a27f0edc99af395d050e8e3be744fe" Nov 21 14:47:35 crc kubenswrapper[5133]: I1121 14:47:35.768065 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-79wcz"] Nov 21 14:47:35 crc kubenswrapper[5133]: I1121 14:47:35.793576 5133 scope.go:117] "RemoveContainer" containerID="2cd89eef13456236068f2eac65d4171883e37c51f9a7b3ba92f1d2dc127e0bc1" Nov 21 14:47:36 crc kubenswrapper[5133]: I1121 14:47:36.482151 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dd8b637b-923e-4e01-86f2-5f28f67490fc" path="/var/lib/kubelet/pods/dd8b637b-923e-4e01-86f2-5f28f67490fc/volumes" Nov 21 14:47:45 crc kubenswrapper[5133]: I1121 14:47:45.458838 5133 scope.go:117] "RemoveContainer" containerID="8580d7a7211cee4078050130d68876fc21d141fe3a8eaa4aa514ada3bc5ab459" Nov 21 14:47:45 crc kubenswrapper[5133]: E1121 14:47:45.462631 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:47:58 crc kubenswrapper[5133]: I1121 14:47:58.460258 5133 scope.go:117] "RemoveContainer" containerID="8580d7a7211cee4078050130d68876fc21d141fe3a8eaa4aa514ada3bc5ab459" Nov 21 14:47:58 crc kubenswrapper[5133]: E1121 14:47:58.461236 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:48:12 crc kubenswrapper[5133]: I1121 14:48:12.463159 5133 scope.go:117] "RemoveContainer" containerID="8580d7a7211cee4078050130d68876fc21d141fe3a8eaa4aa514ada3bc5ab459" Nov 21 14:48:12 crc kubenswrapper[5133]: E1121 14:48:12.464064 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:48:24 crc kubenswrapper[5133]: I1121 14:48:24.461159 5133 scope.go:117] "RemoveContainer" containerID="8580d7a7211cee4078050130d68876fc21d141fe3a8eaa4aa514ada3bc5ab459" Nov 21 14:48:24 crc kubenswrapper[5133]: E1121 14:48:24.461947 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:48:39 crc kubenswrapper[5133]: I1121 14:48:39.458083 5133 scope.go:117] "RemoveContainer" containerID="8580d7a7211cee4078050130d68876fc21d141fe3a8eaa4aa514ada3bc5ab459" Nov 21 14:48:39 crc kubenswrapper[5133]: E1121 14:48:39.459135 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:48:41 crc kubenswrapper[5133]: I1121 14:48:41.135669 5133 scope.go:117] "RemoveContainer" containerID="0df8d5b36dfa4f4ed301dac37a3b3d478e0e9dcea5f50f0fd40a5555d8d10853" Nov 21 14:48:41 crc kubenswrapper[5133]: I1121 14:48:41.161264 5133 scope.go:117] "RemoveContainer" containerID="3675a30f9887180b74fccced679e6439731bb86aff775ddd678aa15c95d17095" Nov 21 14:48:41 crc kubenswrapper[5133]: I1121 14:48:41.203049 5133 scope.go:117] "RemoveContainer" containerID="72c408c36de2699e9a608ec06da01c0da7b5b2ce340091df489595fb98a726a7" Nov 21 14:48:42 crc kubenswrapper[5133]: I1121 14:48:42.267674 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/tempest-tests-tempest"] Nov 21 14:48:42 crc kubenswrapper[5133]: E1121 14:48:42.268547 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dd8b637b-923e-4e01-86f2-5f28f67490fc" containerName="extract-content" Nov 21 14:48:42 crc kubenswrapper[5133]: I1121 14:48:42.268584 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="dd8b637b-923e-4e01-86f2-5f28f67490fc" containerName="extract-content" Nov 21 14:48:42 crc kubenswrapper[5133]: E1121 14:48:42.268621 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dd8b637b-923e-4e01-86f2-5f28f67490fc" containerName="extract-utilities" Nov 21 14:48:42 crc kubenswrapper[5133]: I1121 14:48:42.268639 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="dd8b637b-923e-4e01-86f2-5f28f67490fc" containerName="extract-utilities" Nov 21 14:48:42 crc kubenswrapper[5133]: E1121 14:48:42.268663 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dd8b637b-923e-4e01-86f2-5f28f67490fc" containerName="registry-server" Nov 21 14:48:42 crc kubenswrapper[5133]: I1121 14:48:42.268680 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="dd8b637b-923e-4e01-86f2-5f28f67490fc" containerName="registry-server" Nov 21 14:48:42 crc kubenswrapper[5133]: I1121 14:48:42.269157 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="dd8b637b-923e-4e01-86f2-5f28f67490fc" containerName="registry-server" Nov 21 14:48:42 crc kubenswrapper[5133]: I1121 14:48:42.270542 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 21 14:48:42 crc kubenswrapper[5133]: I1121 14:48:42.273691 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"test-operator-controller-priv-key" Nov 21 14:48:42 crc kubenswrapper[5133]: I1121 14:48:42.273746 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-custom-data-s0" Nov 21 14:48:42 crc kubenswrapper[5133]: I1121 14:48:42.273899 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Nov 21 14:48:42 crc kubenswrapper[5133]: I1121 14:48:42.273989 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-shdsl" Nov 21 14:48:42 crc kubenswrapper[5133]: I1121 14:48:42.284037 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Nov 21 14:48:42 crc kubenswrapper[5133]: I1121 14:48:42.377595 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/9c76c434-18e5-410a-9b4d-1538e6434c05-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"9c76c434-18e5-410a-9b4d-1538e6434c05\") " pod="openstack/tempest-tests-tempest" Nov 21 14:48:42 crc kubenswrapper[5133]: I1121 14:48:42.377947 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/9c76c434-18e5-410a-9b4d-1538e6434c05-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"9c76c434-18e5-410a-9b4d-1538e6434c05\") " pod="openstack/tempest-tests-tempest" Nov 21 14:48:42 crc kubenswrapper[5133]: I1121 14:48:42.377986 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9c76c434-18e5-410a-9b4d-1538e6434c05-config-data\") pod \"tempest-tests-tempest\" (UID: \"9c76c434-18e5-410a-9b4d-1538e6434c05\") " pod="openstack/tempest-tests-tempest" Nov 21 14:48:42 crc kubenswrapper[5133]: I1121 14:48:42.378045 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/9c76c434-18e5-410a-9b4d-1538e6434c05-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"9c76c434-18e5-410a-9b4d-1538e6434c05\") " pod="openstack/tempest-tests-tempest" Nov 21 14:48:42 crc kubenswrapper[5133]: I1121 14:48:42.378166 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/9c76c434-18e5-410a-9b4d-1538e6434c05-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"9c76c434-18e5-410a-9b4d-1538e6434c05\") " pod="openstack/tempest-tests-tempest" Nov 21 14:48:42 crc kubenswrapper[5133]: I1121 14:48:42.378355 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"tempest-tests-tempest\" (UID: \"9c76c434-18e5-410a-9b4d-1538e6434c05\") " pod="openstack/tempest-tests-tempest" Nov 21 14:48:42 crc kubenswrapper[5133]: I1121 14:48:42.378623 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t2wnd\" (UniqueName: \"kubernetes.io/projected/9c76c434-18e5-410a-9b4d-1538e6434c05-kube-api-access-t2wnd\") pod \"tempest-tests-tempest\" (UID: \"9c76c434-18e5-410a-9b4d-1538e6434c05\") " pod="openstack/tempest-tests-tempest" Nov 21 14:48:42 crc kubenswrapper[5133]: I1121 14:48:42.378772 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9c76c434-18e5-410a-9b4d-1538e6434c05-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"9c76c434-18e5-410a-9b4d-1538e6434c05\") " pod="openstack/tempest-tests-tempest" Nov 21 14:48:42 crc kubenswrapper[5133]: I1121 14:48:42.378870 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/9c76c434-18e5-410a-9b4d-1538e6434c05-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"9c76c434-18e5-410a-9b4d-1538e6434c05\") " pod="openstack/tempest-tests-tempest" Nov 21 14:48:42 crc kubenswrapper[5133]: I1121 14:48:42.480425 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/9c76c434-18e5-410a-9b4d-1538e6434c05-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"9c76c434-18e5-410a-9b4d-1538e6434c05\") " pod="openstack/tempest-tests-tempest" Nov 21 14:48:42 crc kubenswrapper[5133]: I1121 14:48:42.480488 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"tempest-tests-tempest\" (UID: \"9c76c434-18e5-410a-9b4d-1538e6434c05\") " pod="openstack/tempest-tests-tempest" Nov 21 14:48:42 crc kubenswrapper[5133]: I1121 14:48:42.480607 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t2wnd\" (UniqueName: \"kubernetes.io/projected/9c76c434-18e5-410a-9b4d-1538e6434c05-kube-api-access-t2wnd\") pod \"tempest-tests-tempest\" (UID: \"9c76c434-18e5-410a-9b4d-1538e6434c05\") " pod="openstack/tempest-tests-tempest" Nov 21 14:48:42 crc kubenswrapper[5133]: I1121 14:48:42.480669 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9c76c434-18e5-410a-9b4d-1538e6434c05-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"9c76c434-18e5-410a-9b4d-1538e6434c05\") " pod="openstack/tempest-tests-tempest" Nov 21 14:48:42 crc kubenswrapper[5133]: I1121 14:48:42.480699 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/9c76c434-18e5-410a-9b4d-1538e6434c05-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"9c76c434-18e5-410a-9b4d-1538e6434c05\") " pod="openstack/tempest-tests-tempest" Nov 21 14:48:42 crc kubenswrapper[5133]: I1121 14:48:42.480835 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/9c76c434-18e5-410a-9b4d-1538e6434c05-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"9c76c434-18e5-410a-9b4d-1538e6434c05\") " pod="openstack/tempest-tests-tempest" Nov 21 14:48:42 crc kubenswrapper[5133]: I1121 14:48:42.480881 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/9c76c434-18e5-410a-9b4d-1538e6434c05-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"9c76c434-18e5-410a-9b4d-1538e6434c05\") " pod="openstack/tempest-tests-tempest" Nov 21 14:48:42 crc kubenswrapper[5133]: I1121 14:48:42.480922 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9c76c434-18e5-410a-9b4d-1538e6434c05-config-data\") pod \"tempest-tests-tempest\" (UID: \"9c76c434-18e5-410a-9b4d-1538e6434c05\") " pod="openstack/tempest-tests-tempest" Nov 21 14:48:42 crc kubenswrapper[5133]: I1121 14:48:42.480967 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/9c76c434-18e5-410a-9b4d-1538e6434c05-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"9c76c434-18e5-410a-9b4d-1538e6434c05\") " pod="openstack/tempest-tests-tempest" Nov 21 14:48:42 crc kubenswrapper[5133]: I1121 14:48:42.481440 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/9c76c434-18e5-410a-9b4d-1538e6434c05-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"9c76c434-18e5-410a-9b4d-1538e6434c05\") " pod="openstack/tempest-tests-tempest" Nov 21 14:48:42 crc kubenswrapper[5133]: I1121 14:48:42.481724 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/9c76c434-18e5-410a-9b4d-1538e6434c05-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"9c76c434-18e5-410a-9b4d-1538e6434c05\") " pod="openstack/tempest-tests-tempest" Nov 21 14:48:42 crc kubenswrapper[5133]: I1121 14:48:42.481854 5133 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"tempest-tests-tempest\" (UID: \"9c76c434-18e5-410a-9b4d-1538e6434c05\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/tempest-tests-tempest" Nov 21 14:48:42 crc kubenswrapper[5133]: I1121 14:48:42.483169 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/9c76c434-18e5-410a-9b4d-1538e6434c05-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"9c76c434-18e5-410a-9b4d-1538e6434c05\") " pod="openstack/tempest-tests-tempest" Nov 21 14:48:42 crc kubenswrapper[5133]: I1121 14:48:42.483322 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9c76c434-18e5-410a-9b4d-1538e6434c05-config-data\") pod \"tempest-tests-tempest\" (UID: \"9c76c434-18e5-410a-9b4d-1538e6434c05\") " pod="openstack/tempest-tests-tempest" Nov 21 14:48:42 crc kubenswrapper[5133]: I1121 14:48:42.487464 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9c76c434-18e5-410a-9b4d-1538e6434c05-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"9c76c434-18e5-410a-9b4d-1538e6434c05\") " pod="openstack/tempest-tests-tempest" Nov 21 14:48:42 crc kubenswrapper[5133]: I1121 14:48:42.493862 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/9c76c434-18e5-410a-9b4d-1538e6434c05-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"9c76c434-18e5-410a-9b4d-1538e6434c05\") " pod="openstack/tempest-tests-tempest" Nov 21 14:48:42 crc kubenswrapper[5133]: I1121 14:48:42.499485 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/9c76c434-18e5-410a-9b4d-1538e6434c05-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"9c76c434-18e5-410a-9b4d-1538e6434c05\") " pod="openstack/tempest-tests-tempest" Nov 21 14:48:42 crc kubenswrapper[5133]: I1121 14:48:42.502496 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t2wnd\" (UniqueName: \"kubernetes.io/projected/9c76c434-18e5-410a-9b4d-1538e6434c05-kube-api-access-t2wnd\") pod \"tempest-tests-tempest\" (UID: \"9c76c434-18e5-410a-9b4d-1538e6434c05\") " pod="openstack/tempest-tests-tempest" Nov 21 14:48:42 crc kubenswrapper[5133]: I1121 14:48:42.511766 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"tempest-tests-tempest\" (UID: \"9c76c434-18e5-410a-9b4d-1538e6434c05\") " pod="openstack/tempest-tests-tempest" Nov 21 14:48:42 crc kubenswrapper[5133]: I1121 14:48:42.609282 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 21 14:48:43 crc kubenswrapper[5133]: I1121 14:48:43.180485 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Nov 21 14:48:43 crc kubenswrapper[5133]: W1121 14:48:43.181800 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9c76c434_18e5_410a_9b4d_1538e6434c05.slice/crio-82f07dd6b12b7103444777f009c65abad0c89da8f337ce5185618120de9983f7 WatchSource:0}: Error finding container 82f07dd6b12b7103444777f009c65abad0c89da8f337ce5185618120de9983f7: Status 404 returned error can't find the container with id 82f07dd6b12b7103444777f009c65abad0c89da8f337ce5185618120de9983f7 Nov 21 14:48:43 crc kubenswrapper[5133]: I1121 14:48:43.428951 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"9c76c434-18e5-410a-9b4d-1538e6434c05","Type":"ContainerStarted","Data":"82f07dd6b12b7103444777f009c65abad0c89da8f337ce5185618120de9983f7"} Nov 21 14:48:54 crc kubenswrapper[5133]: I1121 14:48:54.460792 5133 scope.go:117] "RemoveContainer" containerID="8580d7a7211cee4078050130d68876fc21d141fe3a8eaa4aa514ada3bc5ab459" Nov 21 14:48:54 crc kubenswrapper[5133]: E1121 14:48:54.461804 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:49:05 crc kubenswrapper[5133]: I1121 14:49:05.458305 5133 scope.go:117] "RemoveContainer" containerID="8580d7a7211cee4078050130d68876fc21d141fe3a8eaa4aa514ada3bc5ab459" Nov 21 14:49:05 crc kubenswrapper[5133]: E1121 14:49:05.459275 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:49:18 crc kubenswrapper[5133]: E1121 14:49:18.451736 5133 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified" Nov 21 14:49:18 crc kubenswrapper[5133]: E1121 14:49:18.452549 5133 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:tempest-tests-tempest-tests-runner,Image:quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:test-operator-ephemeral-workdir,ReadOnly:false,MountPath:/var/lib/tempest,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-ephemeral-temporary,ReadOnly:false,MountPath:/tmp,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/test_operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-logs,ReadOnly:false,MountPath:/var/lib/tempest/external_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config,ReadOnly:true,MountPath:/etc/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config,ReadOnly:true,MountPath:/var/lib/tempest/.config/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config-secret,ReadOnly:false,MountPath:/etc/openstack/secure.yaml,SubPath:secure.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ca-certs,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ssh-key,ReadOnly:false,MountPath:/var/lib/tempest/id_ecdsa,SubPath:ssh_key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-t2wnd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42480,RunAsNonRoot:*false,ReadOnlyRootFilesystem:*false,AllowPrivilegeEscalation:*true,RunAsGroup:*42480,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{EnvFromSource{Prefix:,ConfigMapRef:&ConfigMapEnvSource{LocalObjectReference:LocalObjectReference{Name:tempest-tests-tempest-custom-data-s0,},Optional:nil,},SecretRef:nil,},EnvFromSource{Prefix:,ConfigMapRef:&ConfigMapEnvSource{LocalObjectReference:LocalObjectReference{Name:tempest-tests-tempest-env-vars-s0,},Optional:nil,},SecretRef:nil,},},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod tempest-tests-tempest_openstack(9c76c434-18e5-410a-9b4d-1538e6434c05): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 21 14:49:18 crc kubenswrapper[5133]: E1121 14:49:18.454233 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"tempest-tests-tempest-tests-runner\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/tempest-tests-tempest" podUID="9c76c434-18e5-410a-9b4d-1538e6434c05" Nov 21 14:49:18 crc kubenswrapper[5133]: E1121 14:49:18.790989 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"tempest-tests-tempest-tests-runner\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified\\\"\"" pod="openstack/tempest-tests-tempest" podUID="9c76c434-18e5-410a-9b4d-1538e6434c05" Nov 21 14:49:20 crc kubenswrapper[5133]: I1121 14:49:20.457650 5133 scope.go:117] "RemoveContainer" containerID="8580d7a7211cee4078050130d68876fc21d141fe3a8eaa4aa514ada3bc5ab459" Nov 21 14:49:20 crc kubenswrapper[5133]: E1121 14:49:20.458205 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:49:32 crc kubenswrapper[5133]: I1121 14:49:32.465357 5133 scope.go:117] "RemoveContainer" containerID="8580d7a7211cee4078050130d68876fc21d141fe3a8eaa4aa514ada3bc5ab459" Nov 21 14:49:32 crc kubenswrapper[5133]: E1121 14:49:32.466173 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:49:33 crc kubenswrapper[5133]: I1121 14:49:33.461198 5133 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 21 14:49:47 crc kubenswrapper[5133]: I1121 14:49:47.458233 5133 scope.go:117] "RemoveContainer" containerID="8580d7a7211cee4078050130d68876fc21d141fe3a8eaa4aa514ada3bc5ab459" Nov 21 14:49:47 crc kubenswrapper[5133]: E1121 14:49:47.459486 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:49:48 crc kubenswrapper[5133]: I1121 14:49:48.940369 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Nov 21 14:49:51 crc kubenswrapper[5133]: I1121 14:49:51.142621 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"9c76c434-18e5-410a-9b4d-1538e6434c05","Type":"ContainerStarted","Data":"904b1ea40cfda33e5bcd014460ab6e62cdc159e8a25a9ae37c8ec525d109c0ba"} Nov 21 14:49:51 crc kubenswrapper[5133]: I1121 14:49:51.161961 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/tempest-tests-tempest" podStartSLOduration=4.410024733 podStartE2EDuration="1m10.161939402s" podCreationTimestamp="2025-11-21 14:48:41 +0000 UTC" firstStartedPulling="2025-11-21 14:48:43.184454511 +0000 UTC m=+3982.982286789" lastFinishedPulling="2025-11-21 14:49:48.93636921 +0000 UTC m=+4048.734201458" observedRunningTime="2025-11-21 14:49:51.158453939 +0000 UTC m=+4050.956286207" watchObservedRunningTime="2025-11-21 14:49:51.161939402 +0000 UTC m=+4050.959771650" Nov 21 14:49:59 crc kubenswrapper[5133]: I1121 14:49:59.458397 5133 scope.go:117] "RemoveContainer" containerID="8580d7a7211cee4078050130d68876fc21d141fe3a8eaa4aa514ada3bc5ab459" Nov 21 14:50:00 crc kubenswrapper[5133]: I1121 14:50:00.249508 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" event={"ID":"52f5a729-05d1-4f84-a216-1df3233af57d","Type":"ContainerStarted","Data":"1c0379750d4e583fc846bed74993d9b55496d845b0e9cbb58901a88dafa6bc5c"} Nov 21 14:50:41 crc kubenswrapper[5133]: I1121 14:50:41.126298 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-qsshk"] Nov 21 14:50:41 crc kubenswrapper[5133]: I1121 14:50:41.129278 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qsshk" Nov 21 14:50:41 crc kubenswrapper[5133]: I1121 14:50:41.177941 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hmx8d\" (UniqueName: \"kubernetes.io/projected/043bd847-4e5f-4312-a8e5-bc8de1e50572-kube-api-access-hmx8d\") pod \"certified-operators-qsshk\" (UID: \"043bd847-4e5f-4312-a8e5-bc8de1e50572\") " pod="openshift-marketplace/certified-operators-qsshk" Nov 21 14:50:41 crc kubenswrapper[5133]: I1121 14:50:41.178065 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/043bd847-4e5f-4312-a8e5-bc8de1e50572-utilities\") pod \"certified-operators-qsshk\" (UID: \"043bd847-4e5f-4312-a8e5-bc8de1e50572\") " pod="openshift-marketplace/certified-operators-qsshk" Nov 21 14:50:41 crc kubenswrapper[5133]: I1121 14:50:41.178141 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/043bd847-4e5f-4312-a8e5-bc8de1e50572-catalog-content\") pod \"certified-operators-qsshk\" (UID: \"043bd847-4e5f-4312-a8e5-bc8de1e50572\") " pod="openshift-marketplace/certified-operators-qsshk" Nov 21 14:50:41 crc kubenswrapper[5133]: I1121 14:50:41.178360 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qsshk"] Nov 21 14:50:41 crc kubenswrapper[5133]: I1121 14:50:41.280633 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hmx8d\" (UniqueName: \"kubernetes.io/projected/043bd847-4e5f-4312-a8e5-bc8de1e50572-kube-api-access-hmx8d\") pod \"certified-operators-qsshk\" (UID: \"043bd847-4e5f-4312-a8e5-bc8de1e50572\") " pod="openshift-marketplace/certified-operators-qsshk" Nov 21 14:50:41 crc kubenswrapper[5133]: I1121 14:50:41.280735 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/043bd847-4e5f-4312-a8e5-bc8de1e50572-utilities\") pod \"certified-operators-qsshk\" (UID: \"043bd847-4e5f-4312-a8e5-bc8de1e50572\") " pod="openshift-marketplace/certified-operators-qsshk" Nov 21 14:50:41 crc kubenswrapper[5133]: I1121 14:50:41.280798 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/043bd847-4e5f-4312-a8e5-bc8de1e50572-catalog-content\") pod \"certified-operators-qsshk\" (UID: \"043bd847-4e5f-4312-a8e5-bc8de1e50572\") " pod="openshift-marketplace/certified-operators-qsshk" Nov 21 14:50:41 crc kubenswrapper[5133]: I1121 14:50:41.281470 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/043bd847-4e5f-4312-a8e5-bc8de1e50572-catalog-content\") pod \"certified-operators-qsshk\" (UID: \"043bd847-4e5f-4312-a8e5-bc8de1e50572\") " pod="openshift-marketplace/certified-operators-qsshk" Nov 21 14:50:41 crc kubenswrapper[5133]: I1121 14:50:41.282037 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/043bd847-4e5f-4312-a8e5-bc8de1e50572-utilities\") pod \"certified-operators-qsshk\" (UID: \"043bd847-4e5f-4312-a8e5-bc8de1e50572\") " pod="openshift-marketplace/certified-operators-qsshk" Nov 21 14:50:41 crc kubenswrapper[5133]: I1121 14:50:41.302301 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hmx8d\" (UniqueName: \"kubernetes.io/projected/043bd847-4e5f-4312-a8e5-bc8de1e50572-kube-api-access-hmx8d\") pod \"certified-operators-qsshk\" (UID: \"043bd847-4e5f-4312-a8e5-bc8de1e50572\") " pod="openshift-marketplace/certified-operators-qsshk" Nov 21 14:50:41 crc kubenswrapper[5133]: I1121 14:50:41.455809 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qsshk" Nov 21 14:50:42 crc kubenswrapper[5133]: I1121 14:50:42.146715 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qsshk"] Nov 21 14:50:42 crc kubenswrapper[5133]: I1121 14:50:42.702718 5133 generic.go:334] "Generic (PLEG): container finished" podID="043bd847-4e5f-4312-a8e5-bc8de1e50572" containerID="64daa508bb2fce62f59d9f889c38cda81ed955a1753f6f847827c78f1aa6cf40" exitCode=0 Nov 21 14:50:42 crc kubenswrapper[5133]: I1121 14:50:42.702815 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qsshk" event={"ID":"043bd847-4e5f-4312-a8e5-bc8de1e50572","Type":"ContainerDied","Data":"64daa508bb2fce62f59d9f889c38cda81ed955a1753f6f847827c78f1aa6cf40"} Nov 21 14:50:42 crc kubenswrapper[5133]: I1121 14:50:42.703124 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qsshk" event={"ID":"043bd847-4e5f-4312-a8e5-bc8de1e50572","Type":"ContainerStarted","Data":"ad31876fd0010b9c2abea1c8e1337a566bb8bc2df5214b96417841f6c3672b6e"} Nov 21 14:50:43 crc kubenswrapper[5133]: I1121 14:50:43.716497 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qsshk" event={"ID":"043bd847-4e5f-4312-a8e5-bc8de1e50572","Type":"ContainerStarted","Data":"f541edf0f325c294fc3a803fbf051b16330e714dbd726a9e7370ebbb5702c947"} Nov 21 14:50:44 crc kubenswrapper[5133]: I1121 14:50:44.726135 5133 generic.go:334] "Generic (PLEG): container finished" podID="043bd847-4e5f-4312-a8e5-bc8de1e50572" containerID="f541edf0f325c294fc3a803fbf051b16330e714dbd726a9e7370ebbb5702c947" exitCode=0 Nov 21 14:50:44 crc kubenswrapper[5133]: I1121 14:50:44.726207 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qsshk" event={"ID":"043bd847-4e5f-4312-a8e5-bc8de1e50572","Type":"ContainerDied","Data":"f541edf0f325c294fc3a803fbf051b16330e714dbd726a9e7370ebbb5702c947"} Nov 21 14:50:45 crc kubenswrapper[5133]: I1121 14:50:45.741267 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qsshk" event={"ID":"043bd847-4e5f-4312-a8e5-bc8de1e50572","Type":"ContainerStarted","Data":"e55dba71b1e80866817a1920c800b21366f097a009cac94495c1e362530514e8"} Nov 21 14:50:45 crc kubenswrapper[5133]: I1121 14:50:45.764563 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-qsshk" podStartSLOduration=2.151794089 podStartE2EDuration="4.764540266s" podCreationTimestamp="2025-11-21 14:50:41 +0000 UTC" firstStartedPulling="2025-11-21 14:50:42.70484273 +0000 UTC m=+4102.502674978" lastFinishedPulling="2025-11-21 14:50:45.317588907 +0000 UTC m=+4105.115421155" observedRunningTime="2025-11-21 14:50:45.758840894 +0000 UTC m=+4105.556673182" watchObservedRunningTime="2025-11-21 14:50:45.764540266 +0000 UTC m=+4105.562372524" Nov 21 14:50:51 crc kubenswrapper[5133]: I1121 14:50:51.456362 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-qsshk" Nov 21 14:50:51 crc kubenswrapper[5133]: I1121 14:50:51.456946 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-qsshk" Nov 21 14:50:51 crc kubenswrapper[5133]: I1121 14:50:51.875190 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-qsshk" Nov 21 14:50:51 crc kubenswrapper[5133]: I1121 14:50:51.977302 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-qsshk" Nov 21 14:50:52 crc kubenswrapper[5133]: I1121 14:50:52.131230 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-qsshk"] Nov 21 14:50:53 crc kubenswrapper[5133]: I1121 14:50:53.818988 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-qsshk" podUID="043bd847-4e5f-4312-a8e5-bc8de1e50572" containerName="registry-server" containerID="cri-o://e55dba71b1e80866817a1920c800b21366f097a009cac94495c1e362530514e8" gracePeriod=2 Nov 21 14:50:54 crc kubenswrapper[5133]: I1121 14:50:54.829427 5133 generic.go:334] "Generic (PLEG): container finished" podID="043bd847-4e5f-4312-a8e5-bc8de1e50572" containerID="e55dba71b1e80866817a1920c800b21366f097a009cac94495c1e362530514e8" exitCode=0 Nov 21 14:50:54 crc kubenswrapper[5133]: I1121 14:50:54.829490 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qsshk" event={"ID":"043bd847-4e5f-4312-a8e5-bc8de1e50572","Type":"ContainerDied","Data":"e55dba71b1e80866817a1920c800b21366f097a009cac94495c1e362530514e8"} Nov 21 14:50:55 crc kubenswrapper[5133]: I1121 14:50:55.553540 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qsshk" Nov 21 14:50:55 crc kubenswrapper[5133]: I1121 14:50:55.604191 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/043bd847-4e5f-4312-a8e5-bc8de1e50572-catalog-content\") pod \"043bd847-4e5f-4312-a8e5-bc8de1e50572\" (UID: \"043bd847-4e5f-4312-a8e5-bc8de1e50572\") " Nov 21 14:50:55 crc kubenswrapper[5133]: I1121 14:50:55.604730 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hmx8d\" (UniqueName: \"kubernetes.io/projected/043bd847-4e5f-4312-a8e5-bc8de1e50572-kube-api-access-hmx8d\") pod \"043bd847-4e5f-4312-a8e5-bc8de1e50572\" (UID: \"043bd847-4e5f-4312-a8e5-bc8de1e50572\") " Nov 21 14:50:55 crc kubenswrapper[5133]: I1121 14:50:55.604820 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/043bd847-4e5f-4312-a8e5-bc8de1e50572-utilities\") pod \"043bd847-4e5f-4312-a8e5-bc8de1e50572\" (UID: \"043bd847-4e5f-4312-a8e5-bc8de1e50572\") " Nov 21 14:50:55 crc kubenswrapper[5133]: I1121 14:50:55.607350 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/043bd847-4e5f-4312-a8e5-bc8de1e50572-utilities" (OuterVolumeSpecName: "utilities") pod "043bd847-4e5f-4312-a8e5-bc8de1e50572" (UID: "043bd847-4e5f-4312-a8e5-bc8de1e50572"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:50:55 crc kubenswrapper[5133]: I1121 14:50:55.613234 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/043bd847-4e5f-4312-a8e5-bc8de1e50572-kube-api-access-hmx8d" (OuterVolumeSpecName: "kube-api-access-hmx8d") pod "043bd847-4e5f-4312-a8e5-bc8de1e50572" (UID: "043bd847-4e5f-4312-a8e5-bc8de1e50572"). InnerVolumeSpecName "kube-api-access-hmx8d". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:50:55 crc kubenswrapper[5133]: I1121 14:50:55.651351 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/043bd847-4e5f-4312-a8e5-bc8de1e50572-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "043bd847-4e5f-4312-a8e5-bc8de1e50572" (UID: "043bd847-4e5f-4312-a8e5-bc8de1e50572"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:50:55 crc kubenswrapper[5133]: I1121 14:50:55.707888 5133 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/043bd847-4e5f-4312-a8e5-bc8de1e50572-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 14:50:55 crc kubenswrapper[5133]: I1121 14:50:55.707922 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hmx8d\" (UniqueName: \"kubernetes.io/projected/043bd847-4e5f-4312-a8e5-bc8de1e50572-kube-api-access-hmx8d\") on node \"crc\" DevicePath \"\"" Nov 21 14:50:55 crc kubenswrapper[5133]: I1121 14:50:55.707932 5133 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/043bd847-4e5f-4312-a8e5-bc8de1e50572-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 14:50:55 crc kubenswrapper[5133]: I1121 14:50:55.839703 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qsshk" event={"ID":"043bd847-4e5f-4312-a8e5-bc8de1e50572","Type":"ContainerDied","Data":"ad31876fd0010b9c2abea1c8e1337a566bb8bc2df5214b96417841f6c3672b6e"} Nov 21 14:50:55 crc kubenswrapper[5133]: I1121 14:50:55.839763 5133 scope.go:117] "RemoveContainer" containerID="e55dba71b1e80866817a1920c800b21366f097a009cac94495c1e362530514e8" Nov 21 14:50:55 crc kubenswrapper[5133]: I1121 14:50:55.839909 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qsshk" Nov 21 14:50:55 crc kubenswrapper[5133]: I1121 14:50:55.871597 5133 scope.go:117] "RemoveContainer" containerID="f541edf0f325c294fc3a803fbf051b16330e714dbd726a9e7370ebbb5702c947" Nov 21 14:50:55 crc kubenswrapper[5133]: I1121 14:50:55.879023 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-qsshk"] Nov 21 14:50:55 crc kubenswrapper[5133]: I1121 14:50:55.891191 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-qsshk"] Nov 21 14:50:55 crc kubenswrapper[5133]: I1121 14:50:55.899724 5133 scope.go:117] "RemoveContainer" containerID="64daa508bb2fce62f59d9f889c38cda81ed955a1753f6f847827c78f1aa6cf40" Nov 21 14:50:56 crc kubenswrapper[5133]: I1121 14:50:56.480921 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="043bd847-4e5f-4312-a8e5-bc8de1e50572" path="/var/lib/kubelet/pods/043bd847-4e5f-4312-a8e5-bc8de1e50572/volumes" Nov 21 14:52:08 crc kubenswrapper[5133]: I1121 14:52:08.807428 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-4zdgg"] Nov 21 14:52:08 crc kubenswrapper[5133]: E1121 14:52:08.809494 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="043bd847-4e5f-4312-a8e5-bc8de1e50572" containerName="extract-content" Nov 21 14:52:08 crc kubenswrapper[5133]: I1121 14:52:08.809539 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="043bd847-4e5f-4312-a8e5-bc8de1e50572" containerName="extract-content" Nov 21 14:52:08 crc kubenswrapper[5133]: E1121 14:52:08.809567 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="043bd847-4e5f-4312-a8e5-bc8de1e50572" containerName="registry-server" Nov 21 14:52:08 crc kubenswrapper[5133]: I1121 14:52:08.809575 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="043bd847-4e5f-4312-a8e5-bc8de1e50572" containerName="registry-server" Nov 21 14:52:08 crc kubenswrapper[5133]: E1121 14:52:08.809606 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="043bd847-4e5f-4312-a8e5-bc8de1e50572" containerName="extract-utilities" Nov 21 14:52:08 crc kubenswrapper[5133]: I1121 14:52:08.809615 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="043bd847-4e5f-4312-a8e5-bc8de1e50572" containerName="extract-utilities" Nov 21 14:52:08 crc kubenswrapper[5133]: I1121 14:52:08.809840 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="043bd847-4e5f-4312-a8e5-bc8de1e50572" containerName="registry-server" Nov 21 14:52:08 crc kubenswrapper[5133]: I1121 14:52:08.811491 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4zdgg" Nov 21 14:52:08 crc kubenswrapper[5133]: I1121 14:52:08.827199 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-4zdgg"] Nov 21 14:52:08 crc kubenswrapper[5133]: I1121 14:52:08.918394 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8719a67f-6961-4f66-be30-27871c188a8e-catalog-content\") pod \"redhat-marketplace-4zdgg\" (UID: \"8719a67f-6961-4f66-be30-27871c188a8e\") " pod="openshift-marketplace/redhat-marketplace-4zdgg" Nov 21 14:52:08 crc kubenswrapper[5133]: I1121 14:52:08.918475 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8719a67f-6961-4f66-be30-27871c188a8e-utilities\") pod \"redhat-marketplace-4zdgg\" (UID: \"8719a67f-6961-4f66-be30-27871c188a8e\") " pod="openshift-marketplace/redhat-marketplace-4zdgg" Nov 21 14:52:08 crc kubenswrapper[5133]: I1121 14:52:08.918510 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hdjn2\" (UniqueName: \"kubernetes.io/projected/8719a67f-6961-4f66-be30-27871c188a8e-kube-api-access-hdjn2\") pod \"redhat-marketplace-4zdgg\" (UID: \"8719a67f-6961-4f66-be30-27871c188a8e\") " pod="openshift-marketplace/redhat-marketplace-4zdgg" Nov 21 14:52:09 crc kubenswrapper[5133]: I1121 14:52:09.019977 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8719a67f-6961-4f66-be30-27871c188a8e-catalog-content\") pod \"redhat-marketplace-4zdgg\" (UID: \"8719a67f-6961-4f66-be30-27871c188a8e\") " pod="openshift-marketplace/redhat-marketplace-4zdgg" Nov 21 14:52:09 crc kubenswrapper[5133]: I1121 14:52:09.020195 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8719a67f-6961-4f66-be30-27871c188a8e-utilities\") pod \"redhat-marketplace-4zdgg\" (UID: \"8719a67f-6961-4f66-be30-27871c188a8e\") " pod="openshift-marketplace/redhat-marketplace-4zdgg" Nov 21 14:52:09 crc kubenswrapper[5133]: I1121 14:52:09.020266 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hdjn2\" (UniqueName: \"kubernetes.io/projected/8719a67f-6961-4f66-be30-27871c188a8e-kube-api-access-hdjn2\") pod \"redhat-marketplace-4zdgg\" (UID: \"8719a67f-6961-4f66-be30-27871c188a8e\") " pod="openshift-marketplace/redhat-marketplace-4zdgg" Nov 21 14:52:09 crc kubenswrapper[5133]: I1121 14:52:09.020583 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8719a67f-6961-4f66-be30-27871c188a8e-catalog-content\") pod \"redhat-marketplace-4zdgg\" (UID: \"8719a67f-6961-4f66-be30-27871c188a8e\") " pod="openshift-marketplace/redhat-marketplace-4zdgg" Nov 21 14:52:09 crc kubenswrapper[5133]: I1121 14:52:09.020720 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8719a67f-6961-4f66-be30-27871c188a8e-utilities\") pod \"redhat-marketplace-4zdgg\" (UID: \"8719a67f-6961-4f66-be30-27871c188a8e\") " pod="openshift-marketplace/redhat-marketplace-4zdgg" Nov 21 14:52:09 crc kubenswrapper[5133]: I1121 14:52:09.105143 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hdjn2\" (UniqueName: \"kubernetes.io/projected/8719a67f-6961-4f66-be30-27871c188a8e-kube-api-access-hdjn2\") pod \"redhat-marketplace-4zdgg\" (UID: \"8719a67f-6961-4f66-be30-27871c188a8e\") " pod="openshift-marketplace/redhat-marketplace-4zdgg" Nov 21 14:52:09 crc kubenswrapper[5133]: I1121 14:52:09.151536 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4zdgg" Nov 21 14:52:09 crc kubenswrapper[5133]: I1121 14:52:09.761876 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-4zdgg"] Nov 21 14:52:09 crc kubenswrapper[5133]: I1121 14:52:09.897564 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4zdgg" event={"ID":"8719a67f-6961-4f66-be30-27871c188a8e","Type":"ContainerStarted","Data":"587b6dc9678508fa2f49a023db33b12d5305fdfe8302509139d04a48350dc2a1"} Nov 21 14:52:10 crc kubenswrapper[5133]: I1121 14:52:10.910195 5133 generic.go:334] "Generic (PLEG): container finished" podID="8719a67f-6961-4f66-be30-27871c188a8e" containerID="fa4ab52a05cee6d2062a085aca5a7fcb6e77462e012f270074ffdfea1d07bc16" exitCode=0 Nov 21 14:52:10 crc kubenswrapper[5133]: I1121 14:52:10.910473 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4zdgg" event={"ID":"8719a67f-6961-4f66-be30-27871c188a8e","Type":"ContainerDied","Data":"fa4ab52a05cee6d2062a085aca5a7fcb6e77462e012f270074ffdfea1d07bc16"} Nov 21 14:52:12 crc kubenswrapper[5133]: I1121 14:52:12.943321 5133 generic.go:334] "Generic (PLEG): container finished" podID="8719a67f-6961-4f66-be30-27871c188a8e" containerID="53b95cbf42614ca6ede8e3dcb6273db21b25ab61e6f8e15d5f325f06ef55beb0" exitCode=0 Nov 21 14:52:12 crc kubenswrapper[5133]: I1121 14:52:12.943457 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4zdgg" event={"ID":"8719a67f-6961-4f66-be30-27871c188a8e","Type":"ContainerDied","Data":"53b95cbf42614ca6ede8e3dcb6273db21b25ab61e6f8e15d5f325f06ef55beb0"} Nov 21 14:52:15 crc kubenswrapper[5133]: I1121 14:52:15.975317 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4zdgg" event={"ID":"8719a67f-6961-4f66-be30-27871c188a8e","Type":"ContainerStarted","Data":"3c4ed47783eb39ad83f1dcf1d2b03bcf8e970788c68d574f0e31480b10b8bedc"} Nov 21 14:52:19 crc kubenswrapper[5133]: I1121 14:52:19.151678 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-4zdgg" Nov 21 14:52:19 crc kubenswrapper[5133]: I1121 14:52:19.152300 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-4zdgg" Nov 21 14:52:19 crc kubenswrapper[5133]: I1121 14:52:19.219704 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-4zdgg" Nov 21 14:52:19 crc kubenswrapper[5133]: I1121 14:52:19.262813 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-4zdgg" podStartSLOduration=7.435347158 podStartE2EDuration="11.2627924s" podCreationTimestamp="2025-11-21 14:52:08 +0000 UTC" firstStartedPulling="2025-11-21 14:52:10.912535234 +0000 UTC m=+4190.710367482" lastFinishedPulling="2025-11-21 14:52:14.739980466 +0000 UTC m=+4194.537812724" observedRunningTime="2025-11-21 14:52:16.005441938 +0000 UTC m=+4195.803274196" watchObservedRunningTime="2025-11-21 14:52:19.2627924 +0000 UTC m=+4199.060624648" Nov 21 14:52:20 crc kubenswrapper[5133]: I1121 14:52:20.122871 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-4zdgg" Nov 21 14:52:20 crc kubenswrapper[5133]: I1121 14:52:20.167840 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-4zdgg"] Nov 21 14:52:22 crc kubenswrapper[5133]: I1121 14:52:22.068585 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-4zdgg" podUID="8719a67f-6961-4f66-be30-27871c188a8e" containerName="registry-server" containerID="cri-o://3c4ed47783eb39ad83f1dcf1d2b03bcf8e970788c68d574f0e31480b10b8bedc" gracePeriod=2 Nov 21 14:52:22 crc kubenswrapper[5133]: E1121 14:52:22.383853 5133 info.go:109] Failed to get network devices: open /sys/class/net/587b6dc9678508f/address: no such file or directory Nov 21 14:52:22 crc kubenswrapper[5133]: I1121 14:52:22.589361 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4zdgg" Nov 21 14:52:22 crc kubenswrapper[5133]: I1121 14:52:22.700918 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hdjn2\" (UniqueName: \"kubernetes.io/projected/8719a67f-6961-4f66-be30-27871c188a8e-kube-api-access-hdjn2\") pod \"8719a67f-6961-4f66-be30-27871c188a8e\" (UID: \"8719a67f-6961-4f66-be30-27871c188a8e\") " Nov 21 14:52:22 crc kubenswrapper[5133]: I1121 14:52:22.701344 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8719a67f-6961-4f66-be30-27871c188a8e-catalog-content\") pod \"8719a67f-6961-4f66-be30-27871c188a8e\" (UID: \"8719a67f-6961-4f66-be30-27871c188a8e\") " Nov 21 14:52:22 crc kubenswrapper[5133]: I1121 14:52:22.701383 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8719a67f-6961-4f66-be30-27871c188a8e-utilities\") pod \"8719a67f-6961-4f66-be30-27871c188a8e\" (UID: \"8719a67f-6961-4f66-be30-27871c188a8e\") " Nov 21 14:52:22 crc kubenswrapper[5133]: I1121 14:52:22.702353 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8719a67f-6961-4f66-be30-27871c188a8e-utilities" (OuterVolumeSpecName: "utilities") pod "8719a67f-6961-4f66-be30-27871c188a8e" (UID: "8719a67f-6961-4f66-be30-27871c188a8e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:52:22 crc kubenswrapper[5133]: I1121 14:52:22.706603 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8719a67f-6961-4f66-be30-27871c188a8e-kube-api-access-hdjn2" (OuterVolumeSpecName: "kube-api-access-hdjn2") pod "8719a67f-6961-4f66-be30-27871c188a8e" (UID: "8719a67f-6961-4f66-be30-27871c188a8e"). InnerVolumeSpecName "kube-api-access-hdjn2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:52:22 crc kubenswrapper[5133]: I1121 14:52:22.722485 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8719a67f-6961-4f66-be30-27871c188a8e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8719a67f-6961-4f66-be30-27871c188a8e" (UID: "8719a67f-6961-4f66-be30-27871c188a8e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:52:22 crc kubenswrapper[5133]: I1121 14:52:22.803597 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hdjn2\" (UniqueName: \"kubernetes.io/projected/8719a67f-6961-4f66-be30-27871c188a8e-kube-api-access-hdjn2\") on node \"crc\" DevicePath \"\"" Nov 21 14:52:22 crc kubenswrapper[5133]: I1121 14:52:22.803641 5133 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8719a67f-6961-4f66-be30-27871c188a8e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 14:52:22 crc kubenswrapper[5133]: I1121 14:52:22.803652 5133 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8719a67f-6961-4f66-be30-27871c188a8e-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 14:52:23 crc kubenswrapper[5133]: I1121 14:52:23.082862 5133 generic.go:334] "Generic (PLEG): container finished" podID="8719a67f-6961-4f66-be30-27871c188a8e" containerID="3c4ed47783eb39ad83f1dcf1d2b03bcf8e970788c68d574f0e31480b10b8bedc" exitCode=0 Nov 21 14:52:23 crc kubenswrapper[5133]: I1121 14:52:23.082921 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4zdgg" Nov 21 14:52:23 crc kubenswrapper[5133]: I1121 14:52:23.082935 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4zdgg" event={"ID":"8719a67f-6961-4f66-be30-27871c188a8e","Type":"ContainerDied","Data":"3c4ed47783eb39ad83f1dcf1d2b03bcf8e970788c68d574f0e31480b10b8bedc"} Nov 21 14:52:23 crc kubenswrapper[5133]: I1121 14:52:23.082994 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4zdgg" event={"ID":"8719a67f-6961-4f66-be30-27871c188a8e","Type":"ContainerDied","Data":"587b6dc9678508fa2f49a023db33b12d5305fdfe8302509139d04a48350dc2a1"} Nov 21 14:52:23 crc kubenswrapper[5133]: I1121 14:52:23.083073 5133 scope.go:117] "RemoveContainer" containerID="3c4ed47783eb39ad83f1dcf1d2b03bcf8e970788c68d574f0e31480b10b8bedc" Nov 21 14:52:23 crc kubenswrapper[5133]: I1121 14:52:23.126132 5133 scope.go:117] "RemoveContainer" containerID="53b95cbf42614ca6ede8e3dcb6273db21b25ab61e6f8e15d5f325f06ef55beb0" Nov 21 14:52:23 crc kubenswrapper[5133]: I1121 14:52:23.129231 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-4zdgg"] Nov 21 14:52:23 crc kubenswrapper[5133]: I1121 14:52:23.140732 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-4zdgg"] Nov 21 14:52:23 crc kubenswrapper[5133]: I1121 14:52:23.153961 5133 scope.go:117] "RemoveContainer" containerID="fa4ab52a05cee6d2062a085aca5a7fcb6e77462e012f270074ffdfea1d07bc16" Nov 21 14:52:23 crc kubenswrapper[5133]: I1121 14:52:23.209975 5133 scope.go:117] "RemoveContainer" containerID="3c4ed47783eb39ad83f1dcf1d2b03bcf8e970788c68d574f0e31480b10b8bedc" Nov 21 14:52:23 crc kubenswrapper[5133]: E1121 14:52:23.210398 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3c4ed47783eb39ad83f1dcf1d2b03bcf8e970788c68d574f0e31480b10b8bedc\": container with ID starting with 3c4ed47783eb39ad83f1dcf1d2b03bcf8e970788c68d574f0e31480b10b8bedc not found: ID does not exist" containerID="3c4ed47783eb39ad83f1dcf1d2b03bcf8e970788c68d574f0e31480b10b8bedc" Nov 21 14:52:23 crc kubenswrapper[5133]: I1121 14:52:23.210434 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3c4ed47783eb39ad83f1dcf1d2b03bcf8e970788c68d574f0e31480b10b8bedc"} err="failed to get container status \"3c4ed47783eb39ad83f1dcf1d2b03bcf8e970788c68d574f0e31480b10b8bedc\": rpc error: code = NotFound desc = could not find container \"3c4ed47783eb39ad83f1dcf1d2b03bcf8e970788c68d574f0e31480b10b8bedc\": container with ID starting with 3c4ed47783eb39ad83f1dcf1d2b03bcf8e970788c68d574f0e31480b10b8bedc not found: ID does not exist" Nov 21 14:52:23 crc kubenswrapper[5133]: I1121 14:52:23.210499 5133 scope.go:117] "RemoveContainer" containerID="53b95cbf42614ca6ede8e3dcb6273db21b25ab61e6f8e15d5f325f06ef55beb0" Nov 21 14:52:23 crc kubenswrapper[5133]: E1121 14:52:23.210737 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"53b95cbf42614ca6ede8e3dcb6273db21b25ab61e6f8e15d5f325f06ef55beb0\": container with ID starting with 53b95cbf42614ca6ede8e3dcb6273db21b25ab61e6f8e15d5f325f06ef55beb0 not found: ID does not exist" containerID="53b95cbf42614ca6ede8e3dcb6273db21b25ab61e6f8e15d5f325f06ef55beb0" Nov 21 14:52:23 crc kubenswrapper[5133]: I1121 14:52:23.210763 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"53b95cbf42614ca6ede8e3dcb6273db21b25ab61e6f8e15d5f325f06ef55beb0"} err="failed to get container status \"53b95cbf42614ca6ede8e3dcb6273db21b25ab61e6f8e15d5f325f06ef55beb0\": rpc error: code = NotFound desc = could not find container \"53b95cbf42614ca6ede8e3dcb6273db21b25ab61e6f8e15d5f325f06ef55beb0\": container with ID starting with 53b95cbf42614ca6ede8e3dcb6273db21b25ab61e6f8e15d5f325f06ef55beb0 not found: ID does not exist" Nov 21 14:52:23 crc kubenswrapper[5133]: I1121 14:52:23.210777 5133 scope.go:117] "RemoveContainer" containerID="fa4ab52a05cee6d2062a085aca5a7fcb6e77462e012f270074ffdfea1d07bc16" Nov 21 14:52:23 crc kubenswrapper[5133]: E1121 14:52:23.211093 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fa4ab52a05cee6d2062a085aca5a7fcb6e77462e012f270074ffdfea1d07bc16\": container with ID starting with fa4ab52a05cee6d2062a085aca5a7fcb6e77462e012f270074ffdfea1d07bc16 not found: ID does not exist" containerID="fa4ab52a05cee6d2062a085aca5a7fcb6e77462e012f270074ffdfea1d07bc16" Nov 21 14:52:23 crc kubenswrapper[5133]: I1121 14:52:23.211163 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fa4ab52a05cee6d2062a085aca5a7fcb6e77462e012f270074ffdfea1d07bc16"} err="failed to get container status \"fa4ab52a05cee6d2062a085aca5a7fcb6e77462e012f270074ffdfea1d07bc16\": rpc error: code = NotFound desc = could not find container \"fa4ab52a05cee6d2062a085aca5a7fcb6e77462e012f270074ffdfea1d07bc16\": container with ID starting with fa4ab52a05cee6d2062a085aca5a7fcb6e77462e012f270074ffdfea1d07bc16 not found: ID does not exist" Nov 21 14:52:23 crc kubenswrapper[5133]: I1121 14:52:23.310450 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 14:52:23 crc kubenswrapper[5133]: I1121 14:52:23.310534 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 14:52:24 crc kubenswrapper[5133]: I1121 14:52:24.468172 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8719a67f-6961-4f66-be30-27871c188a8e" path="/var/lib/kubelet/pods/8719a67f-6961-4f66-be30-27871c188a8e/volumes" Nov 21 14:52:53 crc kubenswrapper[5133]: I1121 14:52:53.310257 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 14:52:53 crc kubenswrapper[5133]: I1121 14:52:53.310780 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 14:53:23 crc kubenswrapper[5133]: I1121 14:53:23.310920 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 14:53:23 crc kubenswrapper[5133]: I1121 14:53:23.311572 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 14:53:23 crc kubenswrapper[5133]: I1121 14:53:23.311647 5133 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" Nov 21 14:53:23 crc kubenswrapper[5133]: I1121 14:53:23.313274 5133 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"1c0379750d4e583fc846bed74993d9b55496d845b0e9cbb58901a88dafa6bc5c"} pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 14:53:23 crc kubenswrapper[5133]: I1121 14:53:23.313379 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" containerID="cri-o://1c0379750d4e583fc846bed74993d9b55496d845b0e9cbb58901a88dafa6bc5c" gracePeriod=600 Nov 21 14:53:23 crc kubenswrapper[5133]: I1121 14:53:23.750122 5133 generic.go:334] "Generic (PLEG): container finished" podID="52f5a729-05d1-4f84-a216-1df3233af57d" containerID="1c0379750d4e583fc846bed74993d9b55496d845b0e9cbb58901a88dafa6bc5c" exitCode=0 Nov 21 14:53:23 crc kubenswrapper[5133]: I1121 14:53:23.750206 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" event={"ID":"52f5a729-05d1-4f84-a216-1df3233af57d","Type":"ContainerDied","Data":"1c0379750d4e583fc846bed74993d9b55496d845b0e9cbb58901a88dafa6bc5c"} Nov 21 14:53:23 crc kubenswrapper[5133]: I1121 14:53:23.750539 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" event={"ID":"52f5a729-05d1-4f84-a216-1df3233af57d","Type":"ContainerStarted","Data":"804c4033e86e60d9085f68928ef5d8bfdfa7e39b4aaf598c9d7f7fcd731c283e"} Nov 21 14:53:23 crc kubenswrapper[5133]: I1121 14:53:23.750575 5133 scope.go:117] "RemoveContainer" containerID="8580d7a7211cee4078050130d68876fc21d141fe3a8eaa4aa514ada3bc5ab459" Nov 21 14:54:14 crc kubenswrapper[5133]: I1121 14:54:14.092417 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-db-create-2zdfp"] Nov 21 14:54:14 crc kubenswrapper[5133]: I1121 14:54:14.106717 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-88e9-account-create-kv6xr"] Nov 21 14:54:14 crc kubenswrapper[5133]: I1121 14:54:14.118448 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/manila-88e9-account-create-kv6xr"] Nov 21 14:54:14 crc kubenswrapper[5133]: I1121 14:54:14.129197 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/manila-db-create-2zdfp"] Nov 21 14:54:14 crc kubenswrapper[5133]: I1121 14:54:14.477671 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0c2588b0-ad67-4806-a52c-7b0e7a7302c8" path="/var/lib/kubelet/pods/0c2588b0-ad67-4806-a52c-7b0e7a7302c8/volumes" Nov 21 14:54:14 crc kubenswrapper[5133]: I1121 14:54:14.479072 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6dd226ff-efe4-4adb-b60f-68641e1226a8" path="/var/lib/kubelet/pods/6dd226ff-efe4-4adb-b60f-68641e1226a8/volumes" Nov 21 14:54:42 crc kubenswrapper[5133]: I1121 14:54:42.247387 5133 scope.go:117] "RemoveContainer" containerID="221e263e71f739040ba9afb120f8129ce2ce8c1a5dd4841a7c6a8d0844e254e1" Nov 21 14:54:42 crc kubenswrapper[5133]: I1121 14:54:42.282384 5133 scope.go:117] "RemoveContainer" containerID="c73fe7d91dbdae642d848f38668aae283ac9eeae0d618250d60f7b3f4508daa3" Nov 21 14:55:23 crc kubenswrapper[5133]: I1121 14:55:23.310937 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 14:55:23 crc kubenswrapper[5133]: I1121 14:55:23.311522 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 14:55:53 crc kubenswrapper[5133]: I1121 14:55:53.311522 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 14:55:53 crc kubenswrapper[5133]: I1121 14:55:53.312109 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 14:56:08 crc kubenswrapper[5133]: I1121 14:56:08.642651 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-lvsft"] Nov 21 14:56:08 crc kubenswrapper[5133]: E1121 14:56:08.643780 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8719a67f-6961-4f66-be30-27871c188a8e" containerName="extract-utilities" Nov 21 14:56:08 crc kubenswrapper[5133]: I1121 14:56:08.643798 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="8719a67f-6961-4f66-be30-27871c188a8e" containerName="extract-utilities" Nov 21 14:56:08 crc kubenswrapper[5133]: E1121 14:56:08.643857 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8719a67f-6961-4f66-be30-27871c188a8e" containerName="extract-content" Nov 21 14:56:08 crc kubenswrapper[5133]: I1121 14:56:08.643867 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="8719a67f-6961-4f66-be30-27871c188a8e" containerName="extract-content" Nov 21 14:56:08 crc kubenswrapper[5133]: E1121 14:56:08.643894 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8719a67f-6961-4f66-be30-27871c188a8e" containerName="registry-server" Nov 21 14:56:08 crc kubenswrapper[5133]: I1121 14:56:08.643903 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="8719a67f-6961-4f66-be30-27871c188a8e" containerName="registry-server" Nov 21 14:56:08 crc kubenswrapper[5133]: I1121 14:56:08.644137 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="8719a67f-6961-4f66-be30-27871c188a8e" containerName="registry-server" Nov 21 14:56:08 crc kubenswrapper[5133]: I1121 14:56:08.646597 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lvsft" Nov 21 14:56:08 crc kubenswrapper[5133]: I1121 14:56:08.653397 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-lvsft"] Nov 21 14:56:08 crc kubenswrapper[5133]: I1121 14:56:08.755637 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pg6df\" (UniqueName: \"kubernetes.io/projected/8b8f670c-0078-41b5-b04f-3e31a478f4f7-kube-api-access-pg6df\") pod \"redhat-operators-lvsft\" (UID: \"8b8f670c-0078-41b5-b04f-3e31a478f4f7\") " pod="openshift-marketplace/redhat-operators-lvsft" Nov 21 14:56:08 crc kubenswrapper[5133]: I1121 14:56:08.755949 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8b8f670c-0078-41b5-b04f-3e31a478f4f7-utilities\") pod \"redhat-operators-lvsft\" (UID: \"8b8f670c-0078-41b5-b04f-3e31a478f4f7\") " pod="openshift-marketplace/redhat-operators-lvsft" Nov 21 14:56:08 crc kubenswrapper[5133]: I1121 14:56:08.756358 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8b8f670c-0078-41b5-b04f-3e31a478f4f7-catalog-content\") pod \"redhat-operators-lvsft\" (UID: \"8b8f670c-0078-41b5-b04f-3e31a478f4f7\") " pod="openshift-marketplace/redhat-operators-lvsft" Nov 21 14:56:08 crc kubenswrapper[5133]: I1121 14:56:08.858044 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8b8f670c-0078-41b5-b04f-3e31a478f4f7-utilities\") pod \"redhat-operators-lvsft\" (UID: \"8b8f670c-0078-41b5-b04f-3e31a478f4f7\") " pod="openshift-marketplace/redhat-operators-lvsft" Nov 21 14:56:08 crc kubenswrapper[5133]: I1121 14:56:08.858155 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8b8f670c-0078-41b5-b04f-3e31a478f4f7-catalog-content\") pod \"redhat-operators-lvsft\" (UID: \"8b8f670c-0078-41b5-b04f-3e31a478f4f7\") " pod="openshift-marketplace/redhat-operators-lvsft" Nov 21 14:56:08 crc kubenswrapper[5133]: I1121 14:56:08.858211 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pg6df\" (UniqueName: \"kubernetes.io/projected/8b8f670c-0078-41b5-b04f-3e31a478f4f7-kube-api-access-pg6df\") pod \"redhat-operators-lvsft\" (UID: \"8b8f670c-0078-41b5-b04f-3e31a478f4f7\") " pod="openshift-marketplace/redhat-operators-lvsft" Nov 21 14:56:08 crc kubenswrapper[5133]: I1121 14:56:08.858807 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8b8f670c-0078-41b5-b04f-3e31a478f4f7-catalog-content\") pod \"redhat-operators-lvsft\" (UID: \"8b8f670c-0078-41b5-b04f-3e31a478f4f7\") " pod="openshift-marketplace/redhat-operators-lvsft" Nov 21 14:56:08 crc kubenswrapper[5133]: I1121 14:56:08.858811 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8b8f670c-0078-41b5-b04f-3e31a478f4f7-utilities\") pod \"redhat-operators-lvsft\" (UID: \"8b8f670c-0078-41b5-b04f-3e31a478f4f7\") " pod="openshift-marketplace/redhat-operators-lvsft" Nov 21 14:56:09 crc kubenswrapper[5133]: I1121 14:56:09.209964 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pg6df\" (UniqueName: \"kubernetes.io/projected/8b8f670c-0078-41b5-b04f-3e31a478f4f7-kube-api-access-pg6df\") pod \"redhat-operators-lvsft\" (UID: \"8b8f670c-0078-41b5-b04f-3e31a478f4f7\") " pod="openshift-marketplace/redhat-operators-lvsft" Nov 21 14:56:09 crc kubenswrapper[5133]: I1121 14:56:09.268912 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lvsft" Nov 21 14:56:09 crc kubenswrapper[5133]: I1121 14:56:09.813391 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-lvsft"] Nov 21 14:56:10 crc kubenswrapper[5133]: I1121 14:56:10.512480 5133 generic.go:334] "Generic (PLEG): container finished" podID="8b8f670c-0078-41b5-b04f-3e31a478f4f7" containerID="74850da1739f87c89fdd6b01ea9ef64f7b28eed1da4f64cfb9595ba47cf56b67" exitCode=0 Nov 21 14:56:10 crc kubenswrapper[5133]: I1121 14:56:10.512536 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lvsft" event={"ID":"8b8f670c-0078-41b5-b04f-3e31a478f4f7","Type":"ContainerDied","Data":"74850da1739f87c89fdd6b01ea9ef64f7b28eed1da4f64cfb9595ba47cf56b67"} Nov 21 14:56:10 crc kubenswrapper[5133]: I1121 14:56:10.512749 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lvsft" event={"ID":"8b8f670c-0078-41b5-b04f-3e31a478f4f7","Type":"ContainerStarted","Data":"78bffcd8a64a9a4c42752d49bc488241beed99a41529cb1051d189f4bc057a10"} Nov 21 14:56:10 crc kubenswrapper[5133]: I1121 14:56:10.514454 5133 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 21 14:56:11 crc kubenswrapper[5133]: I1121 14:56:11.523970 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lvsft" event={"ID":"8b8f670c-0078-41b5-b04f-3e31a478f4f7","Type":"ContainerStarted","Data":"ac24a548d42e20f485fb6223a330e96324febd432bcb6016637ffb22102b901a"} Nov 21 14:56:12 crc kubenswrapper[5133]: E1121 14:56:12.090344 5133 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8b8f670c_0078_41b5_b04f_3e31a478f4f7.slice/crio-conmon-ac24a548d42e20f485fb6223a330e96324febd432bcb6016637ffb22102b901a.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8b8f670c_0078_41b5_b04f_3e31a478f4f7.slice/crio-ac24a548d42e20f485fb6223a330e96324febd432bcb6016637ffb22102b901a.scope\": RecentStats: unable to find data in memory cache]" Nov 21 14:56:12 crc kubenswrapper[5133]: I1121 14:56:12.546247 5133 generic.go:334] "Generic (PLEG): container finished" podID="8b8f670c-0078-41b5-b04f-3e31a478f4f7" containerID="ac24a548d42e20f485fb6223a330e96324febd432bcb6016637ffb22102b901a" exitCode=0 Nov 21 14:56:12 crc kubenswrapper[5133]: I1121 14:56:12.546903 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lvsft" event={"ID":"8b8f670c-0078-41b5-b04f-3e31a478f4f7","Type":"ContainerDied","Data":"ac24a548d42e20f485fb6223a330e96324febd432bcb6016637ffb22102b901a"} Nov 21 14:56:13 crc kubenswrapper[5133]: I1121 14:56:13.557788 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lvsft" event={"ID":"8b8f670c-0078-41b5-b04f-3e31a478f4f7","Type":"ContainerStarted","Data":"0932e2873ef56ce7e61b6804fb7dfa6ddd8a639af1817cc92d0f40bfa648a8ac"} Nov 21 14:56:13 crc kubenswrapper[5133]: I1121 14:56:13.588854 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-lvsft" podStartSLOduration=3.104293389 podStartE2EDuration="5.588831165s" podCreationTimestamp="2025-11-21 14:56:08 +0000 UTC" firstStartedPulling="2025-11-21 14:56:10.514220041 +0000 UTC m=+4430.312052289" lastFinishedPulling="2025-11-21 14:56:12.998757807 +0000 UTC m=+4432.796590065" observedRunningTime="2025-11-21 14:56:13.580311457 +0000 UTC m=+4433.378143715" watchObservedRunningTime="2025-11-21 14:56:13.588831165 +0000 UTC m=+4433.386663423" Nov 21 14:56:19 crc kubenswrapper[5133]: I1121 14:56:19.269926 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-lvsft" Nov 21 14:56:19 crc kubenswrapper[5133]: I1121 14:56:19.270567 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-lvsft" Nov 21 14:56:20 crc kubenswrapper[5133]: I1121 14:56:20.320375 5133 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-lvsft" podUID="8b8f670c-0078-41b5-b04f-3e31a478f4f7" containerName="registry-server" probeResult="failure" output=< Nov 21 14:56:20 crc kubenswrapper[5133]: timeout: failed to connect service ":50051" within 1s Nov 21 14:56:20 crc kubenswrapper[5133]: > Nov 21 14:56:22 crc kubenswrapper[5133]: I1121 14:56:22.047223 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-db-sync-k4xmx"] Nov 21 14:56:22 crc kubenswrapper[5133]: I1121 14:56:22.059529 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/manila-db-sync-k4xmx"] Nov 21 14:56:22 crc kubenswrapper[5133]: I1121 14:56:22.478716 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4126fdfa-1d4a-4276-951e-c1a26a3492dd" path="/var/lib/kubelet/pods/4126fdfa-1d4a-4276-951e-c1a26a3492dd/volumes" Nov 21 14:56:23 crc kubenswrapper[5133]: I1121 14:56:23.310724 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 14:56:23 crc kubenswrapper[5133]: I1121 14:56:23.310810 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 14:56:23 crc kubenswrapper[5133]: I1121 14:56:23.310879 5133 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" Nov 21 14:56:23 crc kubenswrapper[5133]: I1121 14:56:23.311720 5133 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"804c4033e86e60d9085f68928ef5d8bfdfa7e39b4aaf598c9d7f7fcd731c283e"} pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 14:56:23 crc kubenswrapper[5133]: I1121 14:56:23.311823 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" containerID="cri-o://804c4033e86e60d9085f68928ef5d8bfdfa7e39b4aaf598c9d7f7fcd731c283e" gracePeriod=600 Nov 21 14:56:23 crc kubenswrapper[5133]: E1121 14:56:23.445136 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:56:23 crc kubenswrapper[5133]: I1121 14:56:23.677632 5133 generic.go:334] "Generic (PLEG): container finished" podID="52f5a729-05d1-4f84-a216-1df3233af57d" containerID="804c4033e86e60d9085f68928ef5d8bfdfa7e39b4aaf598c9d7f7fcd731c283e" exitCode=0 Nov 21 14:56:23 crc kubenswrapper[5133]: I1121 14:56:23.677684 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" event={"ID":"52f5a729-05d1-4f84-a216-1df3233af57d","Type":"ContainerDied","Data":"804c4033e86e60d9085f68928ef5d8bfdfa7e39b4aaf598c9d7f7fcd731c283e"} Nov 21 14:56:23 crc kubenswrapper[5133]: I1121 14:56:23.677730 5133 scope.go:117] "RemoveContainer" containerID="1c0379750d4e583fc846bed74993d9b55496d845b0e9cbb58901a88dafa6bc5c" Nov 21 14:56:23 crc kubenswrapper[5133]: I1121 14:56:23.678694 5133 scope.go:117] "RemoveContainer" containerID="804c4033e86e60d9085f68928ef5d8bfdfa7e39b4aaf598c9d7f7fcd731c283e" Nov 21 14:56:23 crc kubenswrapper[5133]: E1121 14:56:23.679229 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:56:29 crc kubenswrapper[5133]: I1121 14:56:29.341576 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-lvsft" Nov 21 14:56:29 crc kubenswrapper[5133]: I1121 14:56:29.404365 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-lvsft" Nov 21 14:56:29 crc kubenswrapper[5133]: I1121 14:56:29.595294 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-lvsft"] Nov 21 14:56:30 crc kubenswrapper[5133]: I1121 14:56:30.746732 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-lvsft" podUID="8b8f670c-0078-41b5-b04f-3e31a478f4f7" containerName="registry-server" containerID="cri-o://0932e2873ef56ce7e61b6804fb7dfa6ddd8a639af1817cc92d0f40bfa648a8ac" gracePeriod=2 Nov 21 14:56:31 crc kubenswrapper[5133]: I1121 14:56:31.370981 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lvsft" Nov 21 14:56:31 crc kubenswrapper[5133]: I1121 14:56:31.443848 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pg6df\" (UniqueName: \"kubernetes.io/projected/8b8f670c-0078-41b5-b04f-3e31a478f4f7-kube-api-access-pg6df\") pod \"8b8f670c-0078-41b5-b04f-3e31a478f4f7\" (UID: \"8b8f670c-0078-41b5-b04f-3e31a478f4f7\") " Nov 21 14:56:31 crc kubenswrapper[5133]: I1121 14:56:31.444239 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8b8f670c-0078-41b5-b04f-3e31a478f4f7-utilities\") pod \"8b8f670c-0078-41b5-b04f-3e31a478f4f7\" (UID: \"8b8f670c-0078-41b5-b04f-3e31a478f4f7\") " Nov 21 14:56:31 crc kubenswrapper[5133]: I1121 14:56:31.444357 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8b8f670c-0078-41b5-b04f-3e31a478f4f7-catalog-content\") pod \"8b8f670c-0078-41b5-b04f-3e31a478f4f7\" (UID: \"8b8f670c-0078-41b5-b04f-3e31a478f4f7\") " Nov 21 14:56:31 crc kubenswrapper[5133]: I1121 14:56:31.444968 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8b8f670c-0078-41b5-b04f-3e31a478f4f7-utilities" (OuterVolumeSpecName: "utilities") pod "8b8f670c-0078-41b5-b04f-3e31a478f4f7" (UID: "8b8f670c-0078-41b5-b04f-3e31a478f4f7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:56:31 crc kubenswrapper[5133]: I1121 14:56:31.451469 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8b8f670c-0078-41b5-b04f-3e31a478f4f7-kube-api-access-pg6df" (OuterVolumeSpecName: "kube-api-access-pg6df") pod "8b8f670c-0078-41b5-b04f-3e31a478f4f7" (UID: "8b8f670c-0078-41b5-b04f-3e31a478f4f7"). InnerVolumeSpecName "kube-api-access-pg6df". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:56:31 crc kubenswrapper[5133]: I1121 14:56:31.546572 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pg6df\" (UniqueName: \"kubernetes.io/projected/8b8f670c-0078-41b5-b04f-3e31a478f4f7-kube-api-access-pg6df\") on node \"crc\" DevicePath \"\"" Nov 21 14:56:31 crc kubenswrapper[5133]: I1121 14:56:31.546611 5133 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8b8f670c-0078-41b5-b04f-3e31a478f4f7-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 14:56:31 crc kubenswrapper[5133]: I1121 14:56:31.555630 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8b8f670c-0078-41b5-b04f-3e31a478f4f7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8b8f670c-0078-41b5-b04f-3e31a478f4f7" (UID: "8b8f670c-0078-41b5-b04f-3e31a478f4f7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:56:31 crc kubenswrapper[5133]: I1121 14:56:31.648805 5133 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8b8f670c-0078-41b5-b04f-3e31a478f4f7-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 14:56:31 crc kubenswrapper[5133]: I1121 14:56:31.759440 5133 generic.go:334] "Generic (PLEG): container finished" podID="8b8f670c-0078-41b5-b04f-3e31a478f4f7" containerID="0932e2873ef56ce7e61b6804fb7dfa6ddd8a639af1817cc92d0f40bfa648a8ac" exitCode=0 Nov 21 14:56:31 crc kubenswrapper[5133]: I1121 14:56:31.759487 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lvsft" event={"ID":"8b8f670c-0078-41b5-b04f-3e31a478f4f7","Type":"ContainerDied","Data":"0932e2873ef56ce7e61b6804fb7dfa6ddd8a639af1817cc92d0f40bfa648a8ac"} Nov 21 14:56:31 crc kubenswrapper[5133]: I1121 14:56:31.759527 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lvsft" event={"ID":"8b8f670c-0078-41b5-b04f-3e31a478f4f7","Type":"ContainerDied","Data":"78bffcd8a64a9a4c42752d49bc488241beed99a41529cb1051d189f4bc057a10"} Nov 21 14:56:31 crc kubenswrapper[5133]: I1121 14:56:31.759540 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lvsft" Nov 21 14:56:31 crc kubenswrapper[5133]: I1121 14:56:31.759550 5133 scope.go:117] "RemoveContainer" containerID="0932e2873ef56ce7e61b6804fb7dfa6ddd8a639af1817cc92d0f40bfa648a8ac" Nov 21 14:56:31 crc kubenswrapper[5133]: I1121 14:56:31.778965 5133 scope.go:117] "RemoveContainer" containerID="ac24a548d42e20f485fb6223a330e96324febd432bcb6016637ffb22102b901a" Nov 21 14:56:31 crc kubenswrapper[5133]: I1121 14:56:31.793524 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-lvsft"] Nov 21 14:56:31 crc kubenswrapper[5133]: I1121 14:56:31.801268 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-lvsft"] Nov 21 14:56:32 crc kubenswrapper[5133]: I1121 14:56:32.217653 5133 scope.go:117] "RemoveContainer" containerID="74850da1739f87c89fdd6b01ea9ef64f7b28eed1da4f64cfb9595ba47cf56b67" Nov 21 14:56:32 crc kubenswrapper[5133]: I1121 14:56:32.270139 5133 scope.go:117] "RemoveContainer" containerID="0932e2873ef56ce7e61b6804fb7dfa6ddd8a639af1817cc92d0f40bfa648a8ac" Nov 21 14:56:32 crc kubenswrapper[5133]: E1121 14:56:32.270834 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0932e2873ef56ce7e61b6804fb7dfa6ddd8a639af1817cc92d0f40bfa648a8ac\": container with ID starting with 0932e2873ef56ce7e61b6804fb7dfa6ddd8a639af1817cc92d0f40bfa648a8ac not found: ID does not exist" containerID="0932e2873ef56ce7e61b6804fb7dfa6ddd8a639af1817cc92d0f40bfa648a8ac" Nov 21 14:56:32 crc kubenswrapper[5133]: I1121 14:56:32.270872 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0932e2873ef56ce7e61b6804fb7dfa6ddd8a639af1817cc92d0f40bfa648a8ac"} err="failed to get container status \"0932e2873ef56ce7e61b6804fb7dfa6ddd8a639af1817cc92d0f40bfa648a8ac\": rpc error: code = NotFound desc = could not find container \"0932e2873ef56ce7e61b6804fb7dfa6ddd8a639af1817cc92d0f40bfa648a8ac\": container with ID starting with 0932e2873ef56ce7e61b6804fb7dfa6ddd8a639af1817cc92d0f40bfa648a8ac not found: ID does not exist" Nov 21 14:56:32 crc kubenswrapper[5133]: I1121 14:56:32.270900 5133 scope.go:117] "RemoveContainer" containerID="ac24a548d42e20f485fb6223a330e96324febd432bcb6016637ffb22102b901a" Nov 21 14:56:32 crc kubenswrapper[5133]: E1121 14:56:32.271650 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ac24a548d42e20f485fb6223a330e96324febd432bcb6016637ffb22102b901a\": container with ID starting with ac24a548d42e20f485fb6223a330e96324febd432bcb6016637ffb22102b901a not found: ID does not exist" containerID="ac24a548d42e20f485fb6223a330e96324febd432bcb6016637ffb22102b901a" Nov 21 14:56:32 crc kubenswrapper[5133]: I1121 14:56:32.271680 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ac24a548d42e20f485fb6223a330e96324febd432bcb6016637ffb22102b901a"} err="failed to get container status \"ac24a548d42e20f485fb6223a330e96324febd432bcb6016637ffb22102b901a\": rpc error: code = NotFound desc = could not find container \"ac24a548d42e20f485fb6223a330e96324febd432bcb6016637ffb22102b901a\": container with ID starting with ac24a548d42e20f485fb6223a330e96324febd432bcb6016637ffb22102b901a not found: ID does not exist" Nov 21 14:56:32 crc kubenswrapper[5133]: I1121 14:56:32.271700 5133 scope.go:117] "RemoveContainer" containerID="74850da1739f87c89fdd6b01ea9ef64f7b28eed1da4f64cfb9595ba47cf56b67" Nov 21 14:56:32 crc kubenswrapper[5133]: E1121 14:56:32.271974 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"74850da1739f87c89fdd6b01ea9ef64f7b28eed1da4f64cfb9595ba47cf56b67\": container with ID starting with 74850da1739f87c89fdd6b01ea9ef64f7b28eed1da4f64cfb9595ba47cf56b67 not found: ID does not exist" containerID="74850da1739f87c89fdd6b01ea9ef64f7b28eed1da4f64cfb9595ba47cf56b67" Nov 21 14:56:32 crc kubenswrapper[5133]: I1121 14:56:32.272027 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"74850da1739f87c89fdd6b01ea9ef64f7b28eed1da4f64cfb9595ba47cf56b67"} err="failed to get container status \"74850da1739f87c89fdd6b01ea9ef64f7b28eed1da4f64cfb9595ba47cf56b67\": rpc error: code = NotFound desc = could not find container \"74850da1739f87c89fdd6b01ea9ef64f7b28eed1da4f64cfb9595ba47cf56b67\": container with ID starting with 74850da1739f87c89fdd6b01ea9ef64f7b28eed1da4f64cfb9595ba47cf56b67 not found: ID does not exist" Nov 21 14:56:32 crc kubenswrapper[5133]: I1121 14:56:32.469758 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8b8f670c-0078-41b5-b04f-3e31a478f4f7" path="/var/lib/kubelet/pods/8b8f670c-0078-41b5-b04f-3e31a478f4f7/volumes" Nov 21 14:56:34 crc kubenswrapper[5133]: I1121 14:56:34.458338 5133 scope.go:117] "RemoveContainer" containerID="804c4033e86e60d9085f68928ef5d8bfdfa7e39b4aaf598c9d7f7fcd731c283e" Nov 21 14:56:34 crc kubenswrapper[5133]: E1121 14:56:34.459274 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:56:42 crc kubenswrapper[5133]: I1121 14:56:42.401055 5133 scope.go:117] "RemoveContainer" containerID="bdd71fb7ddc6251ba22a8e7c91f642a797a40bb264e124d59c897d45aff00ef5" Nov 21 14:56:45 crc kubenswrapper[5133]: I1121 14:56:45.457766 5133 scope.go:117] "RemoveContainer" containerID="804c4033e86e60d9085f68928ef5d8bfdfa7e39b4aaf598c9d7f7fcd731c283e" Nov 21 14:56:45 crc kubenswrapper[5133]: E1121 14:56:45.458594 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:56:57 crc kubenswrapper[5133]: I1121 14:56:57.458617 5133 scope.go:117] "RemoveContainer" containerID="804c4033e86e60d9085f68928ef5d8bfdfa7e39b4aaf598c9d7f7fcd731c283e" Nov 21 14:56:57 crc kubenswrapper[5133]: E1121 14:56:57.459539 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:57:11 crc kubenswrapper[5133]: I1121 14:57:11.458377 5133 scope.go:117] "RemoveContainer" containerID="804c4033e86e60d9085f68928ef5d8bfdfa7e39b4aaf598c9d7f7fcd731c283e" Nov 21 14:57:11 crc kubenswrapper[5133]: E1121 14:57:11.459299 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:57:22 crc kubenswrapper[5133]: I1121 14:57:22.464467 5133 scope.go:117] "RemoveContainer" containerID="804c4033e86e60d9085f68928ef5d8bfdfa7e39b4aaf598c9d7f7fcd731c283e" Nov 21 14:57:22 crc kubenswrapper[5133]: E1121 14:57:22.465831 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:57:34 crc kubenswrapper[5133]: I1121 14:57:34.457835 5133 scope.go:117] "RemoveContainer" containerID="804c4033e86e60d9085f68928ef5d8bfdfa7e39b4aaf598c9d7f7fcd731c283e" Nov 21 14:57:34 crc kubenswrapper[5133]: E1121 14:57:34.460071 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:57:36 crc kubenswrapper[5133]: I1121 14:57:36.676149 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-h4kzz"] Nov 21 14:57:36 crc kubenswrapper[5133]: E1121 14:57:36.676865 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8b8f670c-0078-41b5-b04f-3e31a478f4f7" containerName="extract-content" Nov 21 14:57:36 crc kubenswrapper[5133]: I1121 14:57:36.676880 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="8b8f670c-0078-41b5-b04f-3e31a478f4f7" containerName="extract-content" Nov 21 14:57:36 crc kubenswrapper[5133]: E1121 14:57:36.676906 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8b8f670c-0078-41b5-b04f-3e31a478f4f7" containerName="extract-utilities" Nov 21 14:57:36 crc kubenswrapper[5133]: I1121 14:57:36.676915 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="8b8f670c-0078-41b5-b04f-3e31a478f4f7" containerName="extract-utilities" Nov 21 14:57:36 crc kubenswrapper[5133]: E1121 14:57:36.676934 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8b8f670c-0078-41b5-b04f-3e31a478f4f7" containerName="registry-server" Nov 21 14:57:36 crc kubenswrapper[5133]: I1121 14:57:36.676944 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="8b8f670c-0078-41b5-b04f-3e31a478f4f7" containerName="registry-server" Nov 21 14:57:36 crc kubenswrapper[5133]: I1121 14:57:36.677196 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="8b8f670c-0078-41b5-b04f-3e31a478f4f7" containerName="registry-server" Nov 21 14:57:36 crc kubenswrapper[5133]: I1121 14:57:36.679062 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-h4kzz" Nov 21 14:57:36 crc kubenswrapper[5133]: I1121 14:57:36.686615 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-h4kzz"] Nov 21 14:57:36 crc kubenswrapper[5133]: I1121 14:57:36.710444 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/832f8939-a3a9-4838-8c61-e5ec66d048fb-utilities\") pod \"community-operators-h4kzz\" (UID: \"832f8939-a3a9-4838-8c61-e5ec66d048fb\") " pod="openshift-marketplace/community-operators-h4kzz" Nov 21 14:57:36 crc kubenswrapper[5133]: I1121 14:57:36.710538 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g4qrk\" (UniqueName: \"kubernetes.io/projected/832f8939-a3a9-4838-8c61-e5ec66d048fb-kube-api-access-g4qrk\") pod \"community-operators-h4kzz\" (UID: \"832f8939-a3a9-4838-8c61-e5ec66d048fb\") " pod="openshift-marketplace/community-operators-h4kzz" Nov 21 14:57:36 crc kubenswrapper[5133]: I1121 14:57:36.710574 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/832f8939-a3a9-4838-8c61-e5ec66d048fb-catalog-content\") pod \"community-operators-h4kzz\" (UID: \"832f8939-a3a9-4838-8c61-e5ec66d048fb\") " pod="openshift-marketplace/community-operators-h4kzz" Nov 21 14:57:36 crc kubenswrapper[5133]: I1121 14:57:36.812315 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/832f8939-a3a9-4838-8c61-e5ec66d048fb-utilities\") pod \"community-operators-h4kzz\" (UID: \"832f8939-a3a9-4838-8c61-e5ec66d048fb\") " pod="openshift-marketplace/community-operators-h4kzz" Nov 21 14:57:36 crc kubenswrapper[5133]: I1121 14:57:36.812392 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g4qrk\" (UniqueName: \"kubernetes.io/projected/832f8939-a3a9-4838-8c61-e5ec66d048fb-kube-api-access-g4qrk\") pod \"community-operators-h4kzz\" (UID: \"832f8939-a3a9-4838-8c61-e5ec66d048fb\") " pod="openshift-marketplace/community-operators-h4kzz" Nov 21 14:57:36 crc kubenswrapper[5133]: I1121 14:57:36.812422 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/832f8939-a3a9-4838-8c61-e5ec66d048fb-catalog-content\") pod \"community-operators-h4kzz\" (UID: \"832f8939-a3a9-4838-8c61-e5ec66d048fb\") " pod="openshift-marketplace/community-operators-h4kzz" Nov 21 14:57:36 crc kubenswrapper[5133]: I1121 14:57:36.812815 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/832f8939-a3a9-4838-8c61-e5ec66d048fb-utilities\") pod \"community-operators-h4kzz\" (UID: \"832f8939-a3a9-4838-8c61-e5ec66d048fb\") " pod="openshift-marketplace/community-operators-h4kzz" Nov 21 14:57:36 crc kubenswrapper[5133]: I1121 14:57:36.812875 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/832f8939-a3a9-4838-8c61-e5ec66d048fb-catalog-content\") pod \"community-operators-h4kzz\" (UID: \"832f8939-a3a9-4838-8c61-e5ec66d048fb\") " pod="openshift-marketplace/community-operators-h4kzz" Nov 21 14:57:36 crc kubenswrapper[5133]: I1121 14:57:36.836723 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g4qrk\" (UniqueName: \"kubernetes.io/projected/832f8939-a3a9-4838-8c61-e5ec66d048fb-kube-api-access-g4qrk\") pod \"community-operators-h4kzz\" (UID: \"832f8939-a3a9-4838-8c61-e5ec66d048fb\") " pod="openshift-marketplace/community-operators-h4kzz" Nov 21 14:57:36 crc kubenswrapper[5133]: I1121 14:57:36.998647 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-h4kzz" Nov 21 14:57:38 crc kubenswrapper[5133]: I1121 14:57:38.071795 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-h4kzz"] Nov 21 14:57:38 crc kubenswrapper[5133]: I1121 14:57:38.426951 5133 generic.go:334] "Generic (PLEG): container finished" podID="832f8939-a3a9-4838-8c61-e5ec66d048fb" containerID="4509ee0b91e60e0cb4281db8ac5ea7ce532bca419dc9f0db916173a62f8eeee1" exitCode=0 Nov 21 14:57:38 crc kubenswrapper[5133]: I1121 14:57:38.427018 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-h4kzz" event={"ID":"832f8939-a3a9-4838-8c61-e5ec66d048fb","Type":"ContainerDied","Data":"4509ee0b91e60e0cb4281db8ac5ea7ce532bca419dc9f0db916173a62f8eeee1"} Nov 21 14:57:38 crc kubenswrapper[5133]: I1121 14:57:38.427047 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-h4kzz" event={"ID":"832f8939-a3a9-4838-8c61-e5ec66d048fb","Type":"ContainerStarted","Data":"1a7d8b494134e63b426374d73af3a47f31b7750de93cdda71ea99b0b724824eb"} Nov 21 14:57:40 crc kubenswrapper[5133]: I1121 14:57:40.445876 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-h4kzz" event={"ID":"832f8939-a3a9-4838-8c61-e5ec66d048fb","Type":"ContainerStarted","Data":"f02483a0dd5d72b3f91c4c1920102369263588f39d8885e14337b5d578f7e735"} Nov 21 14:57:42 crc kubenswrapper[5133]: I1121 14:57:42.466591 5133 generic.go:334] "Generic (PLEG): container finished" podID="832f8939-a3a9-4838-8c61-e5ec66d048fb" containerID="f02483a0dd5d72b3f91c4c1920102369263588f39d8885e14337b5d578f7e735" exitCode=0 Nov 21 14:57:42 crc kubenswrapper[5133]: I1121 14:57:42.469950 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-h4kzz" event={"ID":"832f8939-a3a9-4838-8c61-e5ec66d048fb","Type":"ContainerDied","Data":"f02483a0dd5d72b3f91c4c1920102369263588f39d8885e14337b5d578f7e735"} Nov 21 14:57:43 crc kubenswrapper[5133]: I1121 14:57:43.480913 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-h4kzz" event={"ID":"832f8939-a3a9-4838-8c61-e5ec66d048fb","Type":"ContainerStarted","Data":"4c2360f9ba81d8fbe84b601b7bea61e6fd9b15155385e8cf476abf84a0b361dc"} Nov 21 14:57:43 crc kubenswrapper[5133]: I1121 14:57:43.506045 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-h4kzz" podStartSLOduration=2.873620569 podStartE2EDuration="7.506022584s" podCreationTimestamp="2025-11-21 14:57:36 +0000 UTC" firstStartedPulling="2025-11-21 14:57:38.42850779 +0000 UTC m=+4518.226340038" lastFinishedPulling="2025-11-21 14:57:43.060909805 +0000 UTC m=+4522.858742053" observedRunningTime="2025-11-21 14:57:43.504598556 +0000 UTC m=+4523.302430814" watchObservedRunningTime="2025-11-21 14:57:43.506022584 +0000 UTC m=+4523.303854852" Nov 21 14:57:46 crc kubenswrapper[5133]: I1121 14:57:46.999125 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-h4kzz" Nov 21 14:57:47 crc kubenswrapper[5133]: I1121 14:57:46.999882 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-h4kzz" Nov 21 14:57:47 crc kubenswrapper[5133]: I1121 14:57:47.091370 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-h4kzz" Nov 21 14:57:49 crc kubenswrapper[5133]: I1121 14:57:49.457325 5133 scope.go:117] "RemoveContainer" containerID="804c4033e86e60d9085f68928ef5d8bfdfa7e39b4aaf598c9d7f7fcd731c283e" Nov 21 14:57:49 crc kubenswrapper[5133]: E1121 14:57:49.457924 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:57:57 crc kubenswrapper[5133]: I1121 14:57:57.092764 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-h4kzz" Nov 21 14:57:57 crc kubenswrapper[5133]: I1121 14:57:57.156325 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-h4kzz"] Nov 21 14:57:57 crc kubenswrapper[5133]: I1121 14:57:57.628168 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-h4kzz" podUID="832f8939-a3a9-4838-8c61-e5ec66d048fb" containerName="registry-server" containerID="cri-o://4c2360f9ba81d8fbe84b601b7bea61e6fd9b15155385e8cf476abf84a0b361dc" gracePeriod=2 Nov 21 14:57:58 crc kubenswrapper[5133]: I1121 14:57:58.176588 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-h4kzz" Nov 21 14:57:58 crc kubenswrapper[5133]: I1121 14:57:58.263860 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/832f8939-a3a9-4838-8c61-e5ec66d048fb-catalog-content\") pod \"832f8939-a3a9-4838-8c61-e5ec66d048fb\" (UID: \"832f8939-a3a9-4838-8c61-e5ec66d048fb\") " Nov 21 14:57:58 crc kubenswrapper[5133]: I1121 14:57:58.264013 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g4qrk\" (UniqueName: \"kubernetes.io/projected/832f8939-a3a9-4838-8c61-e5ec66d048fb-kube-api-access-g4qrk\") pod \"832f8939-a3a9-4838-8c61-e5ec66d048fb\" (UID: \"832f8939-a3a9-4838-8c61-e5ec66d048fb\") " Nov 21 14:57:58 crc kubenswrapper[5133]: I1121 14:57:58.264273 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/832f8939-a3a9-4838-8c61-e5ec66d048fb-utilities\") pod \"832f8939-a3a9-4838-8c61-e5ec66d048fb\" (UID: \"832f8939-a3a9-4838-8c61-e5ec66d048fb\") " Nov 21 14:57:58 crc kubenswrapper[5133]: I1121 14:57:58.265347 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/832f8939-a3a9-4838-8c61-e5ec66d048fb-utilities" (OuterVolumeSpecName: "utilities") pod "832f8939-a3a9-4838-8c61-e5ec66d048fb" (UID: "832f8939-a3a9-4838-8c61-e5ec66d048fb"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:57:58 crc kubenswrapper[5133]: I1121 14:57:58.283314 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/832f8939-a3a9-4838-8c61-e5ec66d048fb-kube-api-access-g4qrk" (OuterVolumeSpecName: "kube-api-access-g4qrk") pod "832f8939-a3a9-4838-8c61-e5ec66d048fb" (UID: "832f8939-a3a9-4838-8c61-e5ec66d048fb"). InnerVolumeSpecName "kube-api-access-g4qrk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 14:57:58 crc kubenswrapper[5133]: I1121 14:57:58.316389 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/832f8939-a3a9-4838-8c61-e5ec66d048fb-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "832f8939-a3a9-4838-8c61-e5ec66d048fb" (UID: "832f8939-a3a9-4838-8c61-e5ec66d048fb"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 14:57:58 crc kubenswrapper[5133]: I1121 14:57:58.367642 5133 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/832f8939-a3a9-4838-8c61-e5ec66d048fb-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 14:57:58 crc kubenswrapper[5133]: I1121 14:57:58.367718 5133 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/832f8939-a3a9-4838-8c61-e5ec66d048fb-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 14:57:58 crc kubenswrapper[5133]: I1121 14:57:58.367743 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g4qrk\" (UniqueName: \"kubernetes.io/projected/832f8939-a3a9-4838-8c61-e5ec66d048fb-kube-api-access-g4qrk\") on node \"crc\" DevicePath \"\"" Nov 21 14:57:58 crc kubenswrapper[5133]: I1121 14:57:58.638628 5133 generic.go:334] "Generic (PLEG): container finished" podID="832f8939-a3a9-4838-8c61-e5ec66d048fb" containerID="4c2360f9ba81d8fbe84b601b7bea61e6fd9b15155385e8cf476abf84a0b361dc" exitCode=0 Nov 21 14:57:58 crc kubenswrapper[5133]: I1121 14:57:58.639507 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-h4kzz" Nov 21 14:57:58 crc kubenswrapper[5133]: I1121 14:57:58.639532 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-h4kzz" event={"ID":"832f8939-a3a9-4838-8c61-e5ec66d048fb","Type":"ContainerDied","Data":"4c2360f9ba81d8fbe84b601b7bea61e6fd9b15155385e8cf476abf84a0b361dc"} Nov 21 14:57:58 crc kubenswrapper[5133]: I1121 14:57:58.639826 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-h4kzz" event={"ID":"832f8939-a3a9-4838-8c61-e5ec66d048fb","Type":"ContainerDied","Data":"1a7d8b494134e63b426374d73af3a47f31b7750de93cdda71ea99b0b724824eb"} Nov 21 14:57:58 crc kubenswrapper[5133]: I1121 14:57:58.639863 5133 scope.go:117] "RemoveContainer" containerID="4c2360f9ba81d8fbe84b601b7bea61e6fd9b15155385e8cf476abf84a0b361dc" Nov 21 14:57:58 crc kubenswrapper[5133]: I1121 14:57:58.666793 5133 scope.go:117] "RemoveContainer" containerID="f02483a0dd5d72b3f91c4c1920102369263588f39d8885e14337b5d578f7e735" Nov 21 14:57:58 crc kubenswrapper[5133]: I1121 14:57:58.669516 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-h4kzz"] Nov 21 14:57:58 crc kubenswrapper[5133]: I1121 14:57:58.678506 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-h4kzz"] Nov 21 14:57:58 crc kubenswrapper[5133]: I1121 14:57:58.695941 5133 scope.go:117] "RemoveContainer" containerID="4509ee0b91e60e0cb4281db8ac5ea7ce532bca419dc9f0db916173a62f8eeee1" Nov 21 14:57:58 crc kubenswrapper[5133]: I1121 14:57:58.754433 5133 scope.go:117] "RemoveContainer" containerID="4c2360f9ba81d8fbe84b601b7bea61e6fd9b15155385e8cf476abf84a0b361dc" Nov 21 14:57:58 crc kubenswrapper[5133]: E1121 14:57:58.754884 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4c2360f9ba81d8fbe84b601b7bea61e6fd9b15155385e8cf476abf84a0b361dc\": container with ID starting with 4c2360f9ba81d8fbe84b601b7bea61e6fd9b15155385e8cf476abf84a0b361dc not found: ID does not exist" containerID="4c2360f9ba81d8fbe84b601b7bea61e6fd9b15155385e8cf476abf84a0b361dc" Nov 21 14:57:58 crc kubenswrapper[5133]: I1121 14:57:58.754930 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4c2360f9ba81d8fbe84b601b7bea61e6fd9b15155385e8cf476abf84a0b361dc"} err="failed to get container status \"4c2360f9ba81d8fbe84b601b7bea61e6fd9b15155385e8cf476abf84a0b361dc\": rpc error: code = NotFound desc = could not find container \"4c2360f9ba81d8fbe84b601b7bea61e6fd9b15155385e8cf476abf84a0b361dc\": container with ID starting with 4c2360f9ba81d8fbe84b601b7bea61e6fd9b15155385e8cf476abf84a0b361dc not found: ID does not exist" Nov 21 14:57:58 crc kubenswrapper[5133]: I1121 14:57:58.754958 5133 scope.go:117] "RemoveContainer" containerID="f02483a0dd5d72b3f91c4c1920102369263588f39d8885e14337b5d578f7e735" Nov 21 14:57:58 crc kubenswrapper[5133]: E1121 14:57:58.755344 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f02483a0dd5d72b3f91c4c1920102369263588f39d8885e14337b5d578f7e735\": container with ID starting with f02483a0dd5d72b3f91c4c1920102369263588f39d8885e14337b5d578f7e735 not found: ID does not exist" containerID="f02483a0dd5d72b3f91c4c1920102369263588f39d8885e14337b5d578f7e735" Nov 21 14:57:58 crc kubenswrapper[5133]: I1121 14:57:58.755378 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f02483a0dd5d72b3f91c4c1920102369263588f39d8885e14337b5d578f7e735"} err="failed to get container status \"f02483a0dd5d72b3f91c4c1920102369263588f39d8885e14337b5d578f7e735\": rpc error: code = NotFound desc = could not find container \"f02483a0dd5d72b3f91c4c1920102369263588f39d8885e14337b5d578f7e735\": container with ID starting with f02483a0dd5d72b3f91c4c1920102369263588f39d8885e14337b5d578f7e735 not found: ID does not exist" Nov 21 14:57:58 crc kubenswrapper[5133]: I1121 14:57:58.755397 5133 scope.go:117] "RemoveContainer" containerID="4509ee0b91e60e0cb4281db8ac5ea7ce532bca419dc9f0db916173a62f8eeee1" Nov 21 14:57:58 crc kubenswrapper[5133]: E1121 14:57:58.755682 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4509ee0b91e60e0cb4281db8ac5ea7ce532bca419dc9f0db916173a62f8eeee1\": container with ID starting with 4509ee0b91e60e0cb4281db8ac5ea7ce532bca419dc9f0db916173a62f8eeee1 not found: ID does not exist" containerID="4509ee0b91e60e0cb4281db8ac5ea7ce532bca419dc9f0db916173a62f8eeee1" Nov 21 14:57:58 crc kubenswrapper[5133]: I1121 14:57:58.755710 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4509ee0b91e60e0cb4281db8ac5ea7ce532bca419dc9f0db916173a62f8eeee1"} err="failed to get container status \"4509ee0b91e60e0cb4281db8ac5ea7ce532bca419dc9f0db916173a62f8eeee1\": rpc error: code = NotFound desc = could not find container \"4509ee0b91e60e0cb4281db8ac5ea7ce532bca419dc9f0db916173a62f8eeee1\": container with ID starting with 4509ee0b91e60e0cb4281db8ac5ea7ce532bca419dc9f0db916173a62f8eeee1 not found: ID does not exist" Nov 21 14:58:00 crc kubenswrapper[5133]: I1121 14:58:00.471992 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="832f8939-a3a9-4838-8c61-e5ec66d048fb" path="/var/lib/kubelet/pods/832f8939-a3a9-4838-8c61-e5ec66d048fb/volumes" Nov 21 14:58:01 crc kubenswrapper[5133]: I1121 14:58:01.457946 5133 scope.go:117] "RemoveContainer" containerID="804c4033e86e60d9085f68928ef5d8bfdfa7e39b4aaf598c9d7f7fcd731c283e" Nov 21 14:58:01 crc kubenswrapper[5133]: E1121 14:58:01.458502 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:58:14 crc kubenswrapper[5133]: I1121 14:58:14.458187 5133 scope.go:117] "RemoveContainer" containerID="804c4033e86e60d9085f68928ef5d8bfdfa7e39b4aaf598c9d7f7fcd731c283e" Nov 21 14:58:14 crc kubenswrapper[5133]: E1121 14:58:14.458970 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:58:25 crc kubenswrapper[5133]: I1121 14:58:25.457877 5133 scope.go:117] "RemoveContainer" containerID="804c4033e86e60d9085f68928ef5d8bfdfa7e39b4aaf598c9d7f7fcd731c283e" Nov 21 14:58:25 crc kubenswrapper[5133]: E1121 14:58:25.459340 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:58:37 crc kubenswrapper[5133]: I1121 14:58:37.457788 5133 scope.go:117] "RemoveContainer" containerID="804c4033e86e60d9085f68928ef5d8bfdfa7e39b4aaf598c9d7f7fcd731c283e" Nov 21 14:58:37 crc kubenswrapper[5133]: E1121 14:58:37.459177 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:58:48 crc kubenswrapper[5133]: I1121 14:58:48.459510 5133 scope.go:117] "RemoveContainer" containerID="804c4033e86e60d9085f68928ef5d8bfdfa7e39b4aaf598c9d7f7fcd731c283e" Nov 21 14:58:48 crc kubenswrapper[5133]: E1121 14:58:48.460357 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:59:03 crc kubenswrapper[5133]: I1121 14:59:03.458789 5133 scope.go:117] "RemoveContainer" containerID="804c4033e86e60d9085f68928ef5d8bfdfa7e39b4aaf598c9d7f7fcd731c283e" Nov 21 14:59:03 crc kubenswrapper[5133]: E1121 14:59:03.459571 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:59:18 crc kubenswrapper[5133]: I1121 14:59:18.459071 5133 scope.go:117] "RemoveContainer" containerID="804c4033e86e60d9085f68928ef5d8bfdfa7e39b4aaf598c9d7f7fcd731c283e" Nov 21 14:59:18 crc kubenswrapper[5133]: E1121 14:59:18.460230 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:59:33 crc kubenswrapper[5133]: I1121 14:59:33.458900 5133 scope.go:117] "RemoveContainer" containerID="804c4033e86e60d9085f68928ef5d8bfdfa7e39b4aaf598c9d7f7fcd731c283e" Nov 21 14:59:33 crc kubenswrapper[5133]: E1121 14:59:33.460499 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 14:59:48 crc kubenswrapper[5133]: I1121 14:59:48.458357 5133 scope.go:117] "RemoveContainer" containerID="804c4033e86e60d9085f68928ef5d8bfdfa7e39b4aaf598c9d7f7fcd731c283e" Nov 21 14:59:48 crc kubenswrapper[5133]: E1121 14:59:48.459067 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:00:00 crc kubenswrapper[5133]: I1121 15:00:00.140639 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395620-ntjcl"] Nov 21 15:00:00 crc kubenswrapper[5133]: E1121 15:00:00.141706 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="832f8939-a3a9-4838-8c61-e5ec66d048fb" containerName="registry-server" Nov 21 15:00:00 crc kubenswrapper[5133]: I1121 15:00:00.141724 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="832f8939-a3a9-4838-8c61-e5ec66d048fb" containerName="registry-server" Nov 21 15:00:00 crc kubenswrapper[5133]: E1121 15:00:00.141743 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="832f8939-a3a9-4838-8c61-e5ec66d048fb" containerName="extract-content" Nov 21 15:00:00 crc kubenswrapper[5133]: I1121 15:00:00.141751 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="832f8939-a3a9-4838-8c61-e5ec66d048fb" containerName="extract-content" Nov 21 15:00:00 crc kubenswrapper[5133]: E1121 15:00:00.141766 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="832f8939-a3a9-4838-8c61-e5ec66d048fb" containerName="extract-utilities" Nov 21 15:00:00 crc kubenswrapper[5133]: I1121 15:00:00.141775 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="832f8939-a3a9-4838-8c61-e5ec66d048fb" containerName="extract-utilities" Nov 21 15:00:00 crc kubenswrapper[5133]: I1121 15:00:00.141982 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="832f8939-a3a9-4838-8c61-e5ec66d048fb" containerName="registry-server" Nov 21 15:00:00 crc kubenswrapper[5133]: I1121 15:00:00.142799 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395620-ntjcl" Nov 21 15:00:00 crc kubenswrapper[5133]: I1121 15:00:00.145874 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 21 15:00:00 crc kubenswrapper[5133]: I1121 15:00:00.147050 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 21 15:00:00 crc kubenswrapper[5133]: I1121 15:00:00.157363 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395620-ntjcl"] Nov 21 15:00:00 crc kubenswrapper[5133]: I1121 15:00:00.239602 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/648c9cba-13d7-4f5d-a95e-b874c2a3ef62-secret-volume\") pod \"collect-profiles-29395620-ntjcl\" (UID: \"648c9cba-13d7-4f5d-a95e-b874c2a3ef62\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395620-ntjcl" Nov 21 15:00:00 crc kubenswrapper[5133]: I1121 15:00:00.239729 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/648c9cba-13d7-4f5d-a95e-b874c2a3ef62-config-volume\") pod \"collect-profiles-29395620-ntjcl\" (UID: \"648c9cba-13d7-4f5d-a95e-b874c2a3ef62\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395620-ntjcl" Nov 21 15:00:00 crc kubenswrapper[5133]: I1121 15:00:00.239794 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8fxmg\" (UniqueName: \"kubernetes.io/projected/648c9cba-13d7-4f5d-a95e-b874c2a3ef62-kube-api-access-8fxmg\") pod \"collect-profiles-29395620-ntjcl\" (UID: \"648c9cba-13d7-4f5d-a95e-b874c2a3ef62\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395620-ntjcl" Nov 21 15:00:00 crc kubenswrapper[5133]: I1121 15:00:00.342272 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/648c9cba-13d7-4f5d-a95e-b874c2a3ef62-config-volume\") pod \"collect-profiles-29395620-ntjcl\" (UID: \"648c9cba-13d7-4f5d-a95e-b874c2a3ef62\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395620-ntjcl" Nov 21 15:00:00 crc kubenswrapper[5133]: I1121 15:00:00.342361 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8fxmg\" (UniqueName: \"kubernetes.io/projected/648c9cba-13d7-4f5d-a95e-b874c2a3ef62-kube-api-access-8fxmg\") pod \"collect-profiles-29395620-ntjcl\" (UID: \"648c9cba-13d7-4f5d-a95e-b874c2a3ef62\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395620-ntjcl" Nov 21 15:00:00 crc kubenswrapper[5133]: I1121 15:00:00.342494 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/648c9cba-13d7-4f5d-a95e-b874c2a3ef62-secret-volume\") pod \"collect-profiles-29395620-ntjcl\" (UID: \"648c9cba-13d7-4f5d-a95e-b874c2a3ef62\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395620-ntjcl" Nov 21 15:00:00 crc kubenswrapper[5133]: I1121 15:00:00.343355 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/648c9cba-13d7-4f5d-a95e-b874c2a3ef62-config-volume\") pod \"collect-profiles-29395620-ntjcl\" (UID: \"648c9cba-13d7-4f5d-a95e-b874c2a3ef62\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395620-ntjcl" Nov 21 15:00:00 crc kubenswrapper[5133]: I1121 15:00:00.354116 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/648c9cba-13d7-4f5d-a95e-b874c2a3ef62-secret-volume\") pod \"collect-profiles-29395620-ntjcl\" (UID: \"648c9cba-13d7-4f5d-a95e-b874c2a3ef62\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395620-ntjcl" Nov 21 15:00:00 crc kubenswrapper[5133]: I1121 15:00:00.360127 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8fxmg\" (UniqueName: \"kubernetes.io/projected/648c9cba-13d7-4f5d-a95e-b874c2a3ef62-kube-api-access-8fxmg\") pod \"collect-profiles-29395620-ntjcl\" (UID: \"648c9cba-13d7-4f5d-a95e-b874c2a3ef62\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395620-ntjcl" Nov 21 15:00:00 crc kubenswrapper[5133]: I1121 15:00:00.468122 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395620-ntjcl" Nov 21 15:00:00 crc kubenswrapper[5133]: I1121 15:00:00.910066 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395620-ntjcl"] Nov 21 15:00:01 crc kubenswrapper[5133]: I1121 15:00:01.882810 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395620-ntjcl" event={"ID":"648c9cba-13d7-4f5d-a95e-b874c2a3ef62","Type":"ContainerStarted","Data":"afbc9671d28b4dd06353edf7f13cb95fff7c848c57287ceb5e800d0d56ef250c"} Nov 21 15:00:01 crc kubenswrapper[5133]: I1121 15:00:01.883752 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395620-ntjcl" event={"ID":"648c9cba-13d7-4f5d-a95e-b874c2a3ef62","Type":"ContainerStarted","Data":"653dd09527cfe112ed2143358ccdae5ee8cb08949e9c76108698e0786b0b7164"} Nov 21 15:00:02 crc kubenswrapper[5133]: I1121 15:00:02.475560 5133 scope.go:117] "RemoveContainer" containerID="804c4033e86e60d9085f68928ef5d8bfdfa7e39b4aaf598c9d7f7fcd731c283e" Nov 21 15:00:02 crc kubenswrapper[5133]: E1121 15:00:02.475835 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:00:02 crc kubenswrapper[5133]: I1121 15:00:02.897352 5133 generic.go:334] "Generic (PLEG): container finished" podID="648c9cba-13d7-4f5d-a95e-b874c2a3ef62" containerID="afbc9671d28b4dd06353edf7f13cb95fff7c848c57287ceb5e800d0d56ef250c" exitCode=0 Nov 21 15:00:02 crc kubenswrapper[5133]: I1121 15:00:02.897531 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395620-ntjcl" event={"ID":"648c9cba-13d7-4f5d-a95e-b874c2a3ef62","Type":"ContainerDied","Data":"afbc9671d28b4dd06353edf7f13cb95fff7c848c57287ceb5e800d0d56ef250c"} Nov 21 15:00:04 crc kubenswrapper[5133]: I1121 15:00:04.691344 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395620-ntjcl" Nov 21 15:00:04 crc kubenswrapper[5133]: I1121 15:00:04.838888 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/648c9cba-13d7-4f5d-a95e-b874c2a3ef62-config-volume\") pod \"648c9cba-13d7-4f5d-a95e-b874c2a3ef62\" (UID: \"648c9cba-13d7-4f5d-a95e-b874c2a3ef62\") " Nov 21 15:00:04 crc kubenswrapper[5133]: I1121 15:00:04.838939 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8fxmg\" (UniqueName: \"kubernetes.io/projected/648c9cba-13d7-4f5d-a95e-b874c2a3ef62-kube-api-access-8fxmg\") pod \"648c9cba-13d7-4f5d-a95e-b874c2a3ef62\" (UID: \"648c9cba-13d7-4f5d-a95e-b874c2a3ef62\") " Nov 21 15:00:04 crc kubenswrapper[5133]: I1121 15:00:04.839160 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/648c9cba-13d7-4f5d-a95e-b874c2a3ef62-secret-volume\") pod \"648c9cba-13d7-4f5d-a95e-b874c2a3ef62\" (UID: \"648c9cba-13d7-4f5d-a95e-b874c2a3ef62\") " Nov 21 15:00:04 crc kubenswrapper[5133]: I1121 15:00:04.839724 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/648c9cba-13d7-4f5d-a95e-b874c2a3ef62-config-volume" (OuterVolumeSpecName: "config-volume") pod "648c9cba-13d7-4f5d-a95e-b874c2a3ef62" (UID: "648c9cba-13d7-4f5d-a95e-b874c2a3ef62"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:00:04 crc kubenswrapper[5133]: I1121 15:00:04.845178 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/648c9cba-13d7-4f5d-a95e-b874c2a3ef62-kube-api-access-8fxmg" (OuterVolumeSpecName: "kube-api-access-8fxmg") pod "648c9cba-13d7-4f5d-a95e-b874c2a3ef62" (UID: "648c9cba-13d7-4f5d-a95e-b874c2a3ef62"). InnerVolumeSpecName "kube-api-access-8fxmg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:00:04 crc kubenswrapper[5133]: I1121 15:00:04.856405 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/648c9cba-13d7-4f5d-a95e-b874c2a3ef62-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "648c9cba-13d7-4f5d-a95e-b874c2a3ef62" (UID: "648c9cba-13d7-4f5d-a95e-b874c2a3ef62"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:00:04 crc kubenswrapper[5133]: I1121 15:00:04.914058 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395620-ntjcl" event={"ID":"648c9cba-13d7-4f5d-a95e-b874c2a3ef62","Type":"ContainerDied","Data":"653dd09527cfe112ed2143358ccdae5ee8cb08949e9c76108698e0786b0b7164"} Nov 21 15:00:04 crc kubenswrapper[5133]: I1121 15:00:04.914110 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="653dd09527cfe112ed2143358ccdae5ee8cb08949e9c76108698e0786b0b7164" Nov 21 15:00:04 crc kubenswrapper[5133]: I1121 15:00:04.914077 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395620-ntjcl" Nov 21 15:00:04 crc kubenswrapper[5133]: I1121 15:00:04.940849 5133 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/648c9cba-13d7-4f5d-a95e-b874c2a3ef62-config-volume\") on node \"crc\" DevicePath \"\"" Nov 21 15:00:04 crc kubenswrapper[5133]: I1121 15:00:04.940879 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8fxmg\" (UniqueName: \"kubernetes.io/projected/648c9cba-13d7-4f5d-a95e-b874c2a3ef62-kube-api-access-8fxmg\") on node \"crc\" DevicePath \"\"" Nov 21 15:00:04 crc kubenswrapper[5133]: I1121 15:00:04.940892 5133 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/648c9cba-13d7-4f5d-a95e-b874c2a3ef62-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 21 15:00:05 crc kubenswrapper[5133]: I1121 15:00:05.763331 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395575-xg87j"] Nov 21 15:00:05 crc kubenswrapper[5133]: I1121 15:00:05.773117 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395575-xg87j"] Nov 21 15:00:06 crc kubenswrapper[5133]: I1121 15:00:06.468984 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="129a90f1-ca6c-4eb2-8130-ede8a21ac65a" path="/var/lib/kubelet/pods/129a90f1-ca6c-4eb2-8130-ede8a21ac65a/volumes" Nov 21 15:00:16 crc kubenswrapper[5133]: I1121 15:00:16.458183 5133 scope.go:117] "RemoveContainer" containerID="804c4033e86e60d9085f68928ef5d8bfdfa7e39b4aaf598c9d7f7fcd731c283e" Nov 21 15:00:16 crc kubenswrapper[5133]: E1121 15:00:16.459409 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:00:28 crc kubenswrapper[5133]: I1121 15:00:28.457985 5133 scope.go:117] "RemoveContainer" containerID="804c4033e86e60d9085f68928ef5d8bfdfa7e39b4aaf598c9d7f7fcd731c283e" Nov 21 15:00:28 crc kubenswrapper[5133]: E1121 15:00:28.459085 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:00:40 crc kubenswrapper[5133]: I1121 15:00:40.458639 5133 scope.go:117] "RemoveContainer" containerID="804c4033e86e60d9085f68928ef5d8bfdfa7e39b4aaf598c9d7f7fcd731c283e" Nov 21 15:00:40 crc kubenswrapper[5133]: E1121 15:00:40.459787 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:00:42 crc kubenswrapper[5133]: I1121 15:00:42.586830 5133 scope.go:117] "RemoveContainer" containerID="3ca5536b8a3a21486a7e504b3c93b75424ef43f7812000f2b73cca6561cde7c6" Nov 21 15:00:51 crc kubenswrapper[5133]: I1121 15:00:51.458265 5133 scope.go:117] "RemoveContainer" containerID="804c4033e86e60d9085f68928ef5d8bfdfa7e39b4aaf598c9d7f7fcd731c283e" Nov 21 15:00:51 crc kubenswrapper[5133]: E1121 15:00:51.460448 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:01:00 crc kubenswrapper[5133]: I1121 15:01:00.181405 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29395621-24j44"] Nov 21 15:01:00 crc kubenswrapper[5133]: E1121 15:01:00.186251 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="648c9cba-13d7-4f5d-a95e-b874c2a3ef62" containerName="collect-profiles" Nov 21 15:01:00 crc kubenswrapper[5133]: I1121 15:01:00.186493 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="648c9cba-13d7-4f5d-a95e-b874c2a3ef62" containerName="collect-profiles" Nov 21 15:01:00 crc kubenswrapper[5133]: I1121 15:01:00.187291 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="648c9cba-13d7-4f5d-a95e-b874c2a3ef62" containerName="collect-profiles" Nov 21 15:01:00 crc kubenswrapper[5133]: I1121 15:01:00.189501 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29395621-24j44" Nov 21 15:01:00 crc kubenswrapper[5133]: I1121 15:01:00.193742 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29395621-24j44"] Nov 21 15:01:00 crc kubenswrapper[5133]: I1121 15:01:00.241492 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a15a1a4-bdab-48d4-923c-426ab5f12b1b-combined-ca-bundle\") pod \"keystone-cron-29395621-24j44\" (UID: \"7a15a1a4-bdab-48d4-923c-426ab5f12b1b\") " pod="openstack/keystone-cron-29395621-24j44" Nov 21 15:01:00 crc kubenswrapper[5133]: I1121 15:01:00.241569 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7a15a1a4-bdab-48d4-923c-426ab5f12b1b-fernet-keys\") pod \"keystone-cron-29395621-24j44\" (UID: \"7a15a1a4-bdab-48d4-923c-426ab5f12b1b\") " pod="openstack/keystone-cron-29395621-24j44" Nov 21 15:01:00 crc kubenswrapper[5133]: I1121 15:01:00.241630 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c7gv5\" (UniqueName: \"kubernetes.io/projected/7a15a1a4-bdab-48d4-923c-426ab5f12b1b-kube-api-access-c7gv5\") pod \"keystone-cron-29395621-24j44\" (UID: \"7a15a1a4-bdab-48d4-923c-426ab5f12b1b\") " pod="openstack/keystone-cron-29395621-24j44" Nov 21 15:01:00 crc kubenswrapper[5133]: I1121 15:01:00.241677 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a15a1a4-bdab-48d4-923c-426ab5f12b1b-config-data\") pod \"keystone-cron-29395621-24j44\" (UID: \"7a15a1a4-bdab-48d4-923c-426ab5f12b1b\") " pod="openstack/keystone-cron-29395621-24j44" Nov 21 15:01:00 crc kubenswrapper[5133]: I1121 15:01:00.344703 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c7gv5\" (UniqueName: \"kubernetes.io/projected/7a15a1a4-bdab-48d4-923c-426ab5f12b1b-kube-api-access-c7gv5\") pod \"keystone-cron-29395621-24j44\" (UID: \"7a15a1a4-bdab-48d4-923c-426ab5f12b1b\") " pod="openstack/keystone-cron-29395621-24j44" Nov 21 15:01:00 crc kubenswrapper[5133]: I1121 15:01:00.344818 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a15a1a4-bdab-48d4-923c-426ab5f12b1b-config-data\") pod \"keystone-cron-29395621-24j44\" (UID: \"7a15a1a4-bdab-48d4-923c-426ab5f12b1b\") " pod="openstack/keystone-cron-29395621-24j44" Nov 21 15:01:00 crc kubenswrapper[5133]: I1121 15:01:00.345175 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a15a1a4-bdab-48d4-923c-426ab5f12b1b-combined-ca-bundle\") pod \"keystone-cron-29395621-24j44\" (UID: \"7a15a1a4-bdab-48d4-923c-426ab5f12b1b\") " pod="openstack/keystone-cron-29395621-24j44" Nov 21 15:01:00 crc kubenswrapper[5133]: I1121 15:01:00.345421 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7a15a1a4-bdab-48d4-923c-426ab5f12b1b-fernet-keys\") pod \"keystone-cron-29395621-24j44\" (UID: \"7a15a1a4-bdab-48d4-923c-426ab5f12b1b\") " pod="openstack/keystone-cron-29395621-24j44" Nov 21 15:01:00 crc kubenswrapper[5133]: I1121 15:01:00.356156 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a15a1a4-bdab-48d4-923c-426ab5f12b1b-combined-ca-bundle\") pod \"keystone-cron-29395621-24j44\" (UID: \"7a15a1a4-bdab-48d4-923c-426ab5f12b1b\") " pod="openstack/keystone-cron-29395621-24j44" Nov 21 15:01:00 crc kubenswrapper[5133]: I1121 15:01:00.358024 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a15a1a4-bdab-48d4-923c-426ab5f12b1b-config-data\") pod \"keystone-cron-29395621-24j44\" (UID: \"7a15a1a4-bdab-48d4-923c-426ab5f12b1b\") " pod="openstack/keystone-cron-29395621-24j44" Nov 21 15:01:00 crc kubenswrapper[5133]: I1121 15:01:00.359507 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7a15a1a4-bdab-48d4-923c-426ab5f12b1b-fernet-keys\") pod \"keystone-cron-29395621-24j44\" (UID: \"7a15a1a4-bdab-48d4-923c-426ab5f12b1b\") " pod="openstack/keystone-cron-29395621-24j44" Nov 21 15:01:00 crc kubenswrapper[5133]: I1121 15:01:00.379809 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c7gv5\" (UniqueName: \"kubernetes.io/projected/7a15a1a4-bdab-48d4-923c-426ab5f12b1b-kube-api-access-c7gv5\") pod \"keystone-cron-29395621-24j44\" (UID: \"7a15a1a4-bdab-48d4-923c-426ab5f12b1b\") " pod="openstack/keystone-cron-29395621-24j44" Nov 21 15:01:00 crc kubenswrapper[5133]: I1121 15:01:00.513495 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29395621-24j44" Nov 21 15:01:01 crc kubenswrapper[5133]: I1121 15:01:01.031800 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29395621-24j44"] Nov 21 15:01:02 crc kubenswrapper[5133]: I1121 15:01:02.553252 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29395621-24j44" event={"ID":"7a15a1a4-bdab-48d4-923c-426ab5f12b1b","Type":"ContainerStarted","Data":"0d98b2001e799c8dfdedca0f445394b9d470e9d8976dd5621c7b214d98aa1610"} Nov 21 15:01:02 crc kubenswrapper[5133]: I1121 15:01:02.553681 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29395621-24j44" event={"ID":"7a15a1a4-bdab-48d4-923c-426ab5f12b1b","Type":"ContainerStarted","Data":"54d3caf1ed629e378eed02541e71742abb313a6bf0b0d7bc4cd3781999c3620f"} Nov 21 15:01:02 crc kubenswrapper[5133]: I1121 15:01:02.577603 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29395621-24j44" podStartSLOduration=2.5775788459999998 podStartE2EDuration="2.577578846s" podCreationTimestamp="2025-11-21 15:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:01:02.569288544 +0000 UTC m=+4722.367120802" watchObservedRunningTime="2025-11-21 15:01:02.577578846 +0000 UTC m=+4722.375411104" Nov 21 15:01:05 crc kubenswrapper[5133]: I1121 15:01:05.458257 5133 scope.go:117] "RemoveContainer" containerID="804c4033e86e60d9085f68928ef5d8bfdfa7e39b4aaf598c9d7f7fcd731c283e" Nov 21 15:01:05 crc kubenswrapper[5133]: E1121 15:01:05.459466 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:01:10 crc kubenswrapper[5133]: I1121 15:01:10.660908 5133 generic.go:334] "Generic (PLEG): container finished" podID="7a15a1a4-bdab-48d4-923c-426ab5f12b1b" containerID="0d98b2001e799c8dfdedca0f445394b9d470e9d8976dd5621c7b214d98aa1610" exitCode=0 Nov 21 15:01:10 crc kubenswrapper[5133]: I1121 15:01:10.661031 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29395621-24j44" event={"ID":"7a15a1a4-bdab-48d4-923c-426ab5f12b1b","Type":"ContainerDied","Data":"0d98b2001e799c8dfdedca0f445394b9d470e9d8976dd5621c7b214d98aa1610"} Nov 21 15:01:10 crc kubenswrapper[5133]: I1121 15:01:10.895432 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-qh8l6"] Nov 21 15:01:10 crc kubenswrapper[5133]: I1121 15:01:10.898226 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qh8l6" Nov 21 15:01:10 crc kubenswrapper[5133]: I1121 15:01:10.906825 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qh8l6"] Nov 21 15:01:11 crc kubenswrapper[5133]: I1121 15:01:11.083083 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/059bf3a3-8449-4904-a628-35a9dc467cd3-catalog-content\") pod \"certified-operators-qh8l6\" (UID: \"059bf3a3-8449-4904-a628-35a9dc467cd3\") " pod="openshift-marketplace/certified-operators-qh8l6" Nov 21 15:01:11 crc kubenswrapper[5133]: I1121 15:01:11.083141 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/059bf3a3-8449-4904-a628-35a9dc467cd3-utilities\") pod \"certified-operators-qh8l6\" (UID: \"059bf3a3-8449-4904-a628-35a9dc467cd3\") " pod="openshift-marketplace/certified-operators-qh8l6" Nov 21 15:01:11 crc kubenswrapper[5133]: I1121 15:01:11.083256 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kzwjv\" (UniqueName: \"kubernetes.io/projected/059bf3a3-8449-4904-a628-35a9dc467cd3-kube-api-access-kzwjv\") pod \"certified-operators-qh8l6\" (UID: \"059bf3a3-8449-4904-a628-35a9dc467cd3\") " pod="openshift-marketplace/certified-operators-qh8l6" Nov 21 15:01:11 crc kubenswrapper[5133]: I1121 15:01:11.184921 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kzwjv\" (UniqueName: \"kubernetes.io/projected/059bf3a3-8449-4904-a628-35a9dc467cd3-kube-api-access-kzwjv\") pod \"certified-operators-qh8l6\" (UID: \"059bf3a3-8449-4904-a628-35a9dc467cd3\") " pod="openshift-marketplace/certified-operators-qh8l6" Nov 21 15:01:11 crc kubenswrapper[5133]: I1121 15:01:11.185053 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/059bf3a3-8449-4904-a628-35a9dc467cd3-catalog-content\") pod \"certified-operators-qh8l6\" (UID: \"059bf3a3-8449-4904-a628-35a9dc467cd3\") " pod="openshift-marketplace/certified-operators-qh8l6" Nov 21 15:01:11 crc kubenswrapper[5133]: I1121 15:01:11.185095 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/059bf3a3-8449-4904-a628-35a9dc467cd3-utilities\") pod \"certified-operators-qh8l6\" (UID: \"059bf3a3-8449-4904-a628-35a9dc467cd3\") " pod="openshift-marketplace/certified-operators-qh8l6" Nov 21 15:01:11 crc kubenswrapper[5133]: I1121 15:01:11.185661 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/059bf3a3-8449-4904-a628-35a9dc467cd3-utilities\") pod \"certified-operators-qh8l6\" (UID: \"059bf3a3-8449-4904-a628-35a9dc467cd3\") " pod="openshift-marketplace/certified-operators-qh8l6" Nov 21 15:01:11 crc kubenswrapper[5133]: I1121 15:01:11.185960 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/059bf3a3-8449-4904-a628-35a9dc467cd3-catalog-content\") pod \"certified-operators-qh8l6\" (UID: \"059bf3a3-8449-4904-a628-35a9dc467cd3\") " pod="openshift-marketplace/certified-operators-qh8l6" Nov 21 15:01:11 crc kubenswrapper[5133]: I1121 15:01:11.219114 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kzwjv\" (UniqueName: \"kubernetes.io/projected/059bf3a3-8449-4904-a628-35a9dc467cd3-kube-api-access-kzwjv\") pod \"certified-operators-qh8l6\" (UID: \"059bf3a3-8449-4904-a628-35a9dc467cd3\") " pod="openshift-marketplace/certified-operators-qh8l6" Nov 21 15:01:11 crc kubenswrapper[5133]: I1121 15:01:11.246546 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qh8l6" Nov 21 15:01:11 crc kubenswrapper[5133]: I1121 15:01:11.771653 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qh8l6"] Nov 21 15:01:11 crc kubenswrapper[5133]: I1121 15:01:11.980504 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29395621-24j44" Nov 21 15:01:12 crc kubenswrapper[5133]: I1121 15:01:12.119397 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a15a1a4-bdab-48d4-923c-426ab5f12b1b-config-data\") pod \"7a15a1a4-bdab-48d4-923c-426ab5f12b1b\" (UID: \"7a15a1a4-bdab-48d4-923c-426ab5f12b1b\") " Nov 21 15:01:12 crc kubenswrapper[5133]: I1121 15:01:12.119483 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c7gv5\" (UniqueName: \"kubernetes.io/projected/7a15a1a4-bdab-48d4-923c-426ab5f12b1b-kube-api-access-c7gv5\") pod \"7a15a1a4-bdab-48d4-923c-426ab5f12b1b\" (UID: \"7a15a1a4-bdab-48d4-923c-426ab5f12b1b\") " Nov 21 15:01:12 crc kubenswrapper[5133]: I1121 15:01:12.119707 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7a15a1a4-bdab-48d4-923c-426ab5f12b1b-fernet-keys\") pod \"7a15a1a4-bdab-48d4-923c-426ab5f12b1b\" (UID: \"7a15a1a4-bdab-48d4-923c-426ab5f12b1b\") " Nov 21 15:01:12 crc kubenswrapper[5133]: I1121 15:01:12.119785 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a15a1a4-bdab-48d4-923c-426ab5f12b1b-combined-ca-bundle\") pod \"7a15a1a4-bdab-48d4-923c-426ab5f12b1b\" (UID: \"7a15a1a4-bdab-48d4-923c-426ab5f12b1b\") " Nov 21 15:01:12 crc kubenswrapper[5133]: I1121 15:01:12.137522 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7a15a1a4-bdab-48d4-923c-426ab5f12b1b-kube-api-access-c7gv5" (OuterVolumeSpecName: "kube-api-access-c7gv5") pod "7a15a1a4-bdab-48d4-923c-426ab5f12b1b" (UID: "7a15a1a4-bdab-48d4-923c-426ab5f12b1b"). InnerVolumeSpecName "kube-api-access-c7gv5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:01:12 crc kubenswrapper[5133]: I1121 15:01:12.144213 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a15a1a4-bdab-48d4-923c-426ab5f12b1b-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "7a15a1a4-bdab-48d4-923c-426ab5f12b1b" (UID: "7a15a1a4-bdab-48d4-923c-426ab5f12b1b"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:01:12 crc kubenswrapper[5133]: I1121 15:01:12.168406 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a15a1a4-bdab-48d4-923c-426ab5f12b1b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7a15a1a4-bdab-48d4-923c-426ab5f12b1b" (UID: "7a15a1a4-bdab-48d4-923c-426ab5f12b1b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:01:12 crc kubenswrapper[5133]: I1121 15:01:12.194316 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a15a1a4-bdab-48d4-923c-426ab5f12b1b-config-data" (OuterVolumeSpecName: "config-data") pod "7a15a1a4-bdab-48d4-923c-426ab5f12b1b" (UID: "7a15a1a4-bdab-48d4-923c-426ab5f12b1b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:01:12 crc kubenswrapper[5133]: I1121 15:01:12.223816 5133 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a15a1a4-bdab-48d4-923c-426ab5f12b1b-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 15:01:12 crc kubenswrapper[5133]: I1121 15:01:12.223863 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c7gv5\" (UniqueName: \"kubernetes.io/projected/7a15a1a4-bdab-48d4-923c-426ab5f12b1b-kube-api-access-c7gv5\") on node \"crc\" DevicePath \"\"" Nov 21 15:01:12 crc kubenswrapper[5133]: I1121 15:01:12.223874 5133 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7a15a1a4-bdab-48d4-923c-426ab5f12b1b-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 21 15:01:12 crc kubenswrapper[5133]: I1121 15:01:12.223886 5133 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a15a1a4-bdab-48d4-923c-426ab5f12b1b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 21 15:01:12 crc kubenswrapper[5133]: I1121 15:01:12.685108 5133 generic.go:334] "Generic (PLEG): container finished" podID="059bf3a3-8449-4904-a628-35a9dc467cd3" containerID="df6fd3b705176cb2342b620ee76abeb7e8a563a0696f1ea57930778ff7270bfc" exitCode=0 Nov 21 15:01:12 crc kubenswrapper[5133]: I1121 15:01:12.685474 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qh8l6" event={"ID":"059bf3a3-8449-4904-a628-35a9dc467cd3","Type":"ContainerDied","Data":"df6fd3b705176cb2342b620ee76abeb7e8a563a0696f1ea57930778ff7270bfc"} Nov 21 15:01:12 crc kubenswrapper[5133]: I1121 15:01:12.685514 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qh8l6" event={"ID":"059bf3a3-8449-4904-a628-35a9dc467cd3","Type":"ContainerStarted","Data":"075c73c2d6973d883926709aa08873a8822d33151ad714ba2e0c7a85fc90970a"} Nov 21 15:01:12 crc kubenswrapper[5133]: I1121 15:01:12.688653 5133 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 21 15:01:12 crc kubenswrapper[5133]: I1121 15:01:12.691068 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29395621-24j44" Nov 21 15:01:12 crc kubenswrapper[5133]: I1121 15:01:12.690909 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29395621-24j44" event={"ID":"7a15a1a4-bdab-48d4-923c-426ab5f12b1b","Type":"ContainerDied","Data":"54d3caf1ed629e378eed02541e71742abb313a6bf0b0d7bc4cd3781999c3620f"} Nov 21 15:01:12 crc kubenswrapper[5133]: I1121 15:01:12.691990 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="54d3caf1ed629e378eed02541e71742abb313a6bf0b0d7bc4cd3781999c3620f" Nov 21 15:01:15 crc kubenswrapper[5133]: I1121 15:01:15.722222 5133 generic.go:334] "Generic (PLEG): container finished" podID="059bf3a3-8449-4904-a628-35a9dc467cd3" containerID="96b3a0e11d4500acc4f143829098b09fbbafccc582d8c6a88b46ef77b79cb5f3" exitCode=0 Nov 21 15:01:15 crc kubenswrapper[5133]: I1121 15:01:15.722434 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qh8l6" event={"ID":"059bf3a3-8449-4904-a628-35a9dc467cd3","Type":"ContainerDied","Data":"96b3a0e11d4500acc4f143829098b09fbbafccc582d8c6a88b46ef77b79cb5f3"} Nov 21 15:01:16 crc kubenswrapper[5133]: I1121 15:01:16.458860 5133 scope.go:117] "RemoveContainer" containerID="804c4033e86e60d9085f68928ef5d8bfdfa7e39b4aaf598c9d7f7fcd731c283e" Nov 21 15:01:16 crc kubenswrapper[5133]: E1121 15:01:16.459765 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:01:17 crc kubenswrapper[5133]: I1121 15:01:17.750309 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qh8l6" event={"ID":"059bf3a3-8449-4904-a628-35a9dc467cd3","Type":"ContainerStarted","Data":"5a8c82b130c9ec5d18c9772cbd4563426b4f296fa002bb7f16437de7b636c019"} Nov 21 15:01:17 crc kubenswrapper[5133]: I1121 15:01:17.780732 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-qh8l6" podStartSLOduration=3.327394063 podStartE2EDuration="7.780711589s" podCreationTimestamp="2025-11-21 15:01:10 +0000 UTC" firstStartedPulling="2025-11-21 15:01:12.688217488 +0000 UTC m=+4732.486049776" lastFinishedPulling="2025-11-21 15:01:17.141535014 +0000 UTC m=+4736.939367302" observedRunningTime="2025-11-21 15:01:17.770540117 +0000 UTC m=+4737.568372375" watchObservedRunningTime="2025-11-21 15:01:17.780711589 +0000 UTC m=+4737.578543837" Nov 21 15:01:21 crc kubenswrapper[5133]: I1121 15:01:21.247560 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-qh8l6" Nov 21 15:01:21 crc kubenswrapper[5133]: I1121 15:01:21.248727 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-qh8l6" Nov 21 15:01:21 crc kubenswrapper[5133]: I1121 15:01:21.318242 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-qh8l6" Nov 21 15:01:30 crc kubenswrapper[5133]: I1121 15:01:30.460911 5133 scope.go:117] "RemoveContainer" containerID="804c4033e86e60d9085f68928ef5d8bfdfa7e39b4aaf598c9d7f7fcd731c283e" Nov 21 15:01:30 crc kubenswrapper[5133]: I1121 15:01:30.893312 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" event={"ID":"52f5a729-05d1-4f84-a216-1df3233af57d","Type":"ContainerStarted","Data":"1c97c65cd3efcaaa6482f47f143f5b4d2eeff6f58f86078a941ae91a1807c6d0"} Nov 21 15:01:31 crc kubenswrapper[5133]: I1121 15:01:31.310657 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-qh8l6" Nov 21 15:01:31 crc kubenswrapper[5133]: I1121 15:01:31.372593 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-qh8l6"] Nov 21 15:01:31 crc kubenswrapper[5133]: I1121 15:01:31.901698 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-qh8l6" podUID="059bf3a3-8449-4904-a628-35a9dc467cd3" containerName="registry-server" containerID="cri-o://5a8c82b130c9ec5d18c9772cbd4563426b4f296fa002bb7f16437de7b636c019" gracePeriod=2 Nov 21 15:01:32 crc kubenswrapper[5133]: I1121 15:01:32.935262 5133 generic.go:334] "Generic (PLEG): container finished" podID="059bf3a3-8449-4904-a628-35a9dc467cd3" containerID="5a8c82b130c9ec5d18c9772cbd4563426b4f296fa002bb7f16437de7b636c019" exitCode=0 Nov 21 15:01:32 crc kubenswrapper[5133]: I1121 15:01:32.935354 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qh8l6" event={"ID":"059bf3a3-8449-4904-a628-35a9dc467cd3","Type":"ContainerDied","Data":"5a8c82b130c9ec5d18c9772cbd4563426b4f296fa002bb7f16437de7b636c019"} Nov 21 15:01:33 crc kubenswrapper[5133]: I1121 15:01:33.107313 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qh8l6" Nov 21 15:01:33 crc kubenswrapper[5133]: I1121 15:01:33.193382 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kzwjv\" (UniqueName: \"kubernetes.io/projected/059bf3a3-8449-4904-a628-35a9dc467cd3-kube-api-access-kzwjv\") pod \"059bf3a3-8449-4904-a628-35a9dc467cd3\" (UID: \"059bf3a3-8449-4904-a628-35a9dc467cd3\") " Nov 21 15:01:33 crc kubenswrapper[5133]: I1121 15:01:33.193731 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/059bf3a3-8449-4904-a628-35a9dc467cd3-catalog-content\") pod \"059bf3a3-8449-4904-a628-35a9dc467cd3\" (UID: \"059bf3a3-8449-4904-a628-35a9dc467cd3\") " Nov 21 15:01:33 crc kubenswrapper[5133]: I1121 15:01:33.193759 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/059bf3a3-8449-4904-a628-35a9dc467cd3-utilities\") pod \"059bf3a3-8449-4904-a628-35a9dc467cd3\" (UID: \"059bf3a3-8449-4904-a628-35a9dc467cd3\") " Nov 21 15:01:33 crc kubenswrapper[5133]: I1121 15:01:33.195558 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/059bf3a3-8449-4904-a628-35a9dc467cd3-utilities" (OuterVolumeSpecName: "utilities") pod "059bf3a3-8449-4904-a628-35a9dc467cd3" (UID: "059bf3a3-8449-4904-a628-35a9dc467cd3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:01:33 crc kubenswrapper[5133]: I1121 15:01:33.205612 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/059bf3a3-8449-4904-a628-35a9dc467cd3-kube-api-access-kzwjv" (OuterVolumeSpecName: "kube-api-access-kzwjv") pod "059bf3a3-8449-4904-a628-35a9dc467cd3" (UID: "059bf3a3-8449-4904-a628-35a9dc467cd3"). InnerVolumeSpecName "kube-api-access-kzwjv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:01:33 crc kubenswrapper[5133]: I1121 15:01:33.264415 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/059bf3a3-8449-4904-a628-35a9dc467cd3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "059bf3a3-8449-4904-a628-35a9dc467cd3" (UID: "059bf3a3-8449-4904-a628-35a9dc467cd3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:01:33 crc kubenswrapper[5133]: I1121 15:01:33.295870 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kzwjv\" (UniqueName: \"kubernetes.io/projected/059bf3a3-8449-4904-a628-35a9dc467cd3-kube-api-access-kzwjv\") on node \"crc\" DevicePath \"\"" Nov 21 15:01:33 crc kubenswrapper[5133]: I1121 15:01:33.295923 5133 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/059bf3a3-8449-4904-a628-35a9dc467cd3-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 15:01:33 crc kubenswrapper[5133]: I1121 15:01:33.295938 5133 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/059bf3a3-8449-4904-a628-35a9dc467cd3-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 15:01:33 crc kubenswrapper[5133]: I1121 15:01:33.954579 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qh8l6" event={"ID":"059bf3a3-8449-4904-a628-35a9dc467cd3","Type":"ContainerDied","Data":"075c73c2d6973d883926709aa08873a8822d33151ad714ba2e0c7a85fc90970a"} Nov 21 15:01:33 crc kubenswrapper[5133]: I1121 15:01:33.954653 5133 scope.go:117] "RemoveContainer" containerID="5a8c82b130c9ec5d18c9772cbd4563426b4f296fa002bb7f16437de7b636c019" Nov 21 15:01:33 crc kubenswrapper[5133]: I1121 15:01:33.954683 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qh8l6" Nov 21 15:01:33 crc kubenswrapper[5133]: I1121 15:01:33.991785 5133 scope.go:117] "RemoveContainer" containerID="96b3a0e11d4500acc4f143829098b09fbbafccc582d8c6a88b46ef77b79cb5f3" Nov 21 15:01:34 crc kubenswrapper[5133]: I1121 15:01:33.997904 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-qh8l6"] Nov 21 15:01:34 crc kubenswrapper[5133]: I1121 15:01:34.004692 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-qh8l6"] Nov 21 15:01:34 crc kubenswrapper[5133]: I1121 15:01:34.319294 5133 scope.go:117] "RemoveContainer" containerID="df6fd3b705176cb2342b620ee76abeb7e8a563a0696f1ea57930778ff7270bfc" Nov 21 15:01:34 crc kubenswrapper[5133]: I1121 15:01:34.468687 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="059bf3a3-8449-4904-a628-35a9dc467cd3" path="/var/lib/kubelet/pods/059bf3a3-8449-4904-a628-35a9dc467cd3/volumes" Nov 21 15:02:42 crc kubenswrapper[5133]: I1121 15:02:42.809695 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-96q5w"] Nov 21 15:02:42 crc kubenswrapper[5133]: E1121 15:02:42.811049 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="059bf3a3-8449-4904-a628-35a9dc467cd3" containerName="extract-content" Nov 21 15:02:42 crc kubenswrapper[5133]: I1121 15:02:42.811067 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="059bf3a3-8449-4904-a628-35a9dc467cd3" containerName="extract-content" Nov 21 15:02:42 crc kubenswrapper[5133]: E1121 15:02:42.811085 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="059bf3a3-8449-4904-a628-35a9dc467cd3" containerName="extract-utilities" Nov 21 15:02:42 crc kubenswrapper[5133]: I1121 15:02:42.811091 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="059bf3a3-8449-4904-a628-35a9dc467cd3" containerName="extract-utilities" Nov 21 15:02:42 crc kubenswrapper[5133]: E1121 15:02:42.811104 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="059bf3a3-8449-4904-a628-35a9dc467cd3" containerName="registry-server" Nov 21 15:02:42 crc kubenswrapper[5133]: I1121 15:02:42.811111 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="059bf3a3-8449-4904-a628-35a9dc467cd3" containerName="registry-server" Nov 21 15:02:42 crc kubenswrapper[5133]: E1121 15:02:42.811120 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a15a1a4-bdab-48d4-923c-426ab5f12b1b" containerName="keystone-cron" Nov 21 15:02:42 crc kubenswrapper[5133]: I1121 15:02:42.811125 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a15a1a4-bdab-48d4-923c-426ab5f12b1b" containerName="keystone-cron" Nov 21 15:02:42 crc kubenswrapper[5133]: I1121 15:02:42.811378 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="059bf3a3-8449-4904-a628-35a9dc467cd3" containerName="registry-server" Nov 21 15:02:42 crc kubenswrapper[5133]: I1121 15:02:42.811420 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="7a15a1a4-bdab-48d4-923c-426ab5f12b1b" containerName="keystone-cron" Nov 21 15:02:42 crc kubenswrapper[5133]: I1121 15:02:42.814280 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-96q5w" Nov 21 15:02:42 crc kubenswrapper[5133]: I1121 15:02:42.826010 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-96q5w"] Nov 21 15:02:42 crc kubenswrapper[5133]: I1121 15:02:42.990708 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9935133a-95e4-4fa7-aa6a-c118eda51908-catalog-content\") pod \"redhat-marketplace-96q5w\" (UID: \"9935133a-95e4-4fa7-aa6a-c118eda51908\") " pod="openshift-marketplace/redhat-marketplace-96q5w" Nov 21 15:02:42 crc kubenswrapper[5133]: I1121 15:02:42.990796 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jgnk4\" (UniqueName: \"kubernetes.io/projected/9935133a-95e4-4fa7-aa6a-c118eda51908-kube-api-access-jgnk4\") pod \"redhat-marketplace-96q5w\" (UID: \"9935133a-95e4-4fa7-aa6a-c118eda51908\") " pod="openshift-marketplace/redhat-marketplace-96q5w" Nov 21 15:02:42 crc kubenswrapper[5133]: I1121 15:02:42.990952 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9935133a-95e4-4fa7-aa6a-c118eda51908-utilities\") pod \"redhat-marketplace-96q5w\" (UID: \"9935133a-95e4-4fa7-aa6a-c118eda51908\") " pod="openshift-marketplace/redhat-marketplace-96q5w" Nov 21 15:02:43 crc kubenswrapper[5133]: I1121 15:02:43.093283 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9935133a-95e4-4fa7-aa6a-c118eda51908-catalog-content\") pod \"redhat-marketplace-96q5w\" (UID: \"9935133a-95e4-4fa7-aa6a-c118eda51908\") " pod="openshift-marketplace/redhat-marketplace-96q5w" Nov 21 15:02:43 crc kubenswrapper[5133]: I1121 15:02:43.093481 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jgnk4\" (UniqueName: \"kubernetes.io/projected/9935133a-95e4-4fa7-aa6a-c118eda51908-kube-api-access-jgnk4\") pod \"redhat-marketplace-96q5w\" (UID: \"9935133a-95e4-4fa7-aa6a-c118eda51908\") " pod="openshift-marketplace/redhat-marketplace-96q5w" Nov 21 15:02:43 crc kubenswrapper[5133]: I1121 15:02:43.093563 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9935133a-95e4-4fa7-aa6a-c118eda51908-utilities\") pod \"redhat-marketplace-96q5w\" (UID: \"9935133a-95e4-4fa7-aa6a-c118eda51908\") " pod="openshift-marketplace/redhat-marketplace-96q5w" Nov 21 15:02:43 crc kubenswrapper[5133]: I1121 15:02:43.093938 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9935133a-95e4-4fa7-aa6a-c118eda51908-catalog-content\") pod \"redhat-marketplace-96q5w\" (UID: \"9935133a-95e4-4fa7-aa6a-c118eda51908\") " pod="openshift-marketplace/redhat-marketplace-96q5w" Nov 21 15:02:43 crc kubenswrapper[5133]: I1121 15:02:43.094401 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9935133a-95e4-4fa7-aa6a-c118eda51908-utilities\") pod \"redhat-marketplace-96q5w\" (UID: \"9935133a-95e4-4fa7-aa6a-c118eda51908\") " pod="openshift-marketplace/redhat-marketplace-96q5w" Nov 21 15:02:43 crc kubenswrapper[5133]: I1121 15:02:43.135370 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jgnk4\" (UniqueName: \"kubernetes.io/projected/9935133a-95e4-4fa7-aa6a-c118eda51908-kube-api-access-jgnk4\") pod \"redhat-marketplace-96q5w\" (UID: \"9935133a-95e4-4fa7-aa6a-c118eda51908\") " pod="openshift-marketplace/redhat-marketplace-96q5w" Nov 21 15:02:43 crc kubenswrapper[5133]: I1121 15:02:43.137534 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-96q5w" Nov 21 15:02:43 crc kubenswrapper[5133]: I1121 15:02:43.744341 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-96q5w"] Nov 21 15:02:43 crc kubenswrapper[5133]: I1121 15:02:43.768883 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-96q5w" event={"ID":"9935133a-95e4-4fa7-aa6a-c118eda51908","Type":"ContainerStarted","Data":"6b840cf88f0e50e19a53eacdfaeccb5df0cd362551a48d5f404df2c39728df7b"} Nov 21 15:02:44 crc kubenswrapper[5133]: I1121 15:02:44.778441 5133 generic.go:334] "Generic (PLEG): container finished" podID="9935133a-95e4-4fa7-aa6a-c118eda51908" containerID="bf7f754a5840905716f737b44098a192e7663d14038b4f8def5f92084002e581" exitCode=0 Nov 21 15:02:44 crc kubenswrapper[5133]: I1121 15:02:44.778559 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-96q5w" event={"ID":"9935133a-95e4-4fa7-aa6a-c118eda51908","Type":"ContainerDied","Data":"bf7f754a5840905716f737b44098a192e7663d14038b4f8def5f92084002e581"} Nov 21 15:02:47 crc kubenswrapper[5133]: I1121 15:02:47.807080 5133 generic.go:334] "Generic (PLEG): container finished" podID="9935133a-95e4-4fa7-aa6a-c118eda51908" containerID="8769884e3ff737795d75dc264c6a292a1ecfd86f183ad4e400536d9bd005e37f" exitCode=0 Nov 21 15:02:47 crc kubenswrapper[5133]: I1121 15:02:47.807407 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-96q5w" event={"ID":"9935133a-95e4-4fa7-aa6a-c118eda51908","Type":"ContainerDied","Data":"8769884e3ff737795d75dc264c6a292a1ecfd86f183ad4e400536d9bd005e37f"} Nov 21 15:02:52 crc kubenswrapper[5133]: I1121 15:02:50.849260 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-96q5w" event={"ID":"9935133a-95e4-4fa7-aa6a-c118eda51908","Type":"ContainerStarted","Data":"8e710e8225c654e6a31bca1ebd0e655555cee7092f40de86f93fcc066c82fe64"} Nov 21 15:02:52 crc kubenswrapper[5133]: I1121 15:02:50.883140 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-96q5w" podStartSLOduration=3.713282861 podStartE2EDuration="8.883117769s" podCreationTimestamp="2025-11-21 15:02:42 +0000 UTC" firstStartedPulling="2025-11-21 15:02:44.780099753 +0000 UTC m=+4824.577932001" lastFinishedPulling="2025-11-21 15:02:49.949934651 +0000 UTC m=+4829.747766909" observedRunningTime="2025-11-21 15:02:50.875598518 +0000 UTC m=+4830.673430786" watchObservedRunningTime="2025-11-21 15:02:50.883117769 +0000 UTC m=+4830.680950037" Nov 21 15:02:53 crc kubenswrapper[5133]: I1121 15:02:53.138215 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-96q5w" Nov 21 15:02:53 crc kubenswrapper[5133]: I1121 15:02:53.138803 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-96q5w" Nov 21 15:02:53 crc kubenswrapper[5133]: I1121 15:02:53.203797 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-96q5w" Nov 21 15:03:03 crc kubenswrapper[5133]: I1121 15:03:03.201146 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-96q5w" Nov 21 15:03:03 crc kubenswrapper[5133]: I1121 15:03:03.260777 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-96q5w"] Nov 21 15:03:03 crc kubenswrapper[5133]: I1121 15:03:03.261071 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-96q5w" podUID="9935133a-95e4-4fa7-aa6a-c118eda51908" containerName="registry-server" containerID="cri-o://8e710e8225c654e6a31bca1ebd0e655555cee7092f40de86f93fcc066c82fe64" gracePeriod=2 Nov 21 15:03:03 crc kubenswrapper[5133]: I1121 15:03:03.884122 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-96q5w" Nov 21 15:03:03 crc kubenswrapper[5133]: I1121 15:03:03.978621 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jgnk4\" (UniqueName: \"kubernetes.io/projected/9935133a-95e4-4fa7-aa6a-c118eda51908-kube-api-access-jgnk4\") pod \"9935133a-95e4-4fa7-aa6a-c118eda51908\" (UID: \"9935133a-95e4-4fa7-aa6a-c118eda51908\") " Nov 21 15:03:03 crc kubenswrapper[5133]: I1121 15:03:03.978859 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9935133a-95e4-4fa7-aa6a-c118eda51908-utilities\") pod \"9935133a-95e4-4fa7-aa6a-c118eda51908\" (UID: \"9935133a-95e4-4fa7-aa6a-c118eda51908\") " Nov 21 15:03:03 crc kubenswrapper[5133]: I1121 15:03:03.978988 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9935133a-95e4-4fa7-aa6a-c118eda51908-catalog-content\") pod \"9935133a-95e4-4fa7-aa6a-c118eda51908\" (UID: \"9935133a-95e4-4fa7-aa6a-c118eda51908\") " Nov 21 15:03:03 crc kubenswrapper[5133]: I1121 15:03:03.979869 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9935133a-95e4-4fa7-aa6a-c118eda51908-utilities" (OuterVolumeSpecName: "utilities") pod "9935133a-95e4-4fa7-aa6a-c118eda51908" (UID: "9935133a-95e4-4fa7-aa6a-c118eda51908"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:03:03 crc kubenswrapper[5133]: I1121 15:03:03.984501 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9935133a-95e4-4fa7-aa6a-c118eda51908-kube-api-access-jgnk4" (OuterVolumeSpecName: "kube-api-access-jgnk4") pod "9935133a-95e4-4fa7-aa6a-c118eda51908" (UID: "9935133a-95e4-4fa7-aa6a-c118eda51908"). InnerVolumeSpecName "kube-api-access-jgnk4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:03:03 crc kubenswrapper[5133]: I1121 15:03:03.998785 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9935133a-95e4-4fa7-aa6a-c118eda51908-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9935133a-95e4-4fa7-aa6a-c118eda51908" (UID: "9935133a-95e4-4fa7-aa6a-c118eda51908"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:03:04 crc kubenswrapper[5133]: I1121 15:03:04.081949 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jgnk4\" (UniqueName: \"kubernetes.io/projected/9935133a-95e4-4fa7-aa6a-c118eda51908-kube-api-access-jgnk4\") on node \"crc\" DevicePath \"\"" Nov 21 15:03:04 crc kubenswrapper[5133]: I1121 15:03:04.082012 5133 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9935133a-95e4-4fa7-aa6a-c118eda51908-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 15:03:04 crc kubenswrapper[5133]: I1121 15:03:04.082027 5133 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9935133a-95e4-4fa7-aa6a-c118eda51908-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 15:03:04 crc kubenswrapper[5133]: I1121 15:03:04.229164 5133 generic.go:334] "Generic (PLEG): container finished" podID="9935133a-95e4-4fa7-aa6a-c118eda51908" containerID="8e710e8225c654e6a31bca1ebd0e655555cee7092f40de86f93fcc066c82fe64" exitCode=0 Nov 21 15:03:04 crc kubenswrapper[5133]: I1121 15:03:04.229224 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-96q5w" event={"ID":"9935133a-95e4-4fa7-aa6a-c118eda51908","Type":"ContainerDied","Data":"8e710e8225c654e6a31bca1ebd0e655555cee7092f40de86f93fcc066c82fe64"} Nov 21 15:03:04 crc kubenswrapper[5133]: I1121 15:03:04.229274 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-96q5w" event={"ID":"9935133a-95e4-4fa7-aa6a-c118eda51908","Type":"ContainerDied","Data":"6b840cf88f0e50e19a53eacdfaeccb5df0cd362551a48d5f404df2c39728df7b"} Nov 21 15:03:04 crc kubenswrapper[5133]: I1121 15:03:04.229306 5133 scope.go:117] "RemoveContainer" containerID="8e710e8225c654e6a31bca1ebd0e655555cee7092f40de86f93fcc066c82fe64" Nov 21 15:03:04 crc kubenswrapper[5133]: I1121 15:03:04.230759 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-96q5w" Nov 21 15:03:04 crc kubenswrapper[5133]: I1121 15:03:04.257683 5133 scope.go:117] "RemoveContainer" containerID="8769884e3ff737795d75dc264c6a292a1ecfd86f183ad4e400536d9bd005e37f" Nov 21 15:03:04 crc kubenswrapper[5133]: I1121 15:03:04.273573 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-96q5w"] Nov 21 15:03:04 crc kubenswrapper[5133]: I1121 15:03:04.281207 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-96q5w"] Nov 21 15:03:04 crc kubenswrapper[5133]: I1121 15:03:04.297371 5133 scope.go:117] "RemoveContainer" containerID="bf7f754a5840905716f737b44098a192e7663d14038b4f8def5f92084002e581" Nov 21 15:03:04 crc kubenswrapper[5133]: I1121 15:03:04.325891 5133 scope.go:117] "RemoveContainer" containerID="8e710e8225c654e6a31bca1ebd0e655555cee7092f40de86f93fcc066c82fe64" Nov 21 15:03:04 crc kubenswrapper[5133]: E1121 15:03:04.326459 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8e710e8225c654e6a31bca1ebd0e655555cee7092f40de86f93fcc066c82fe64\": container with ID starting with 8e710e8225c654e6a31bca1ebd0e655555cee7092f40de86f93fcc066c82fe64 not found: ID does not exist" containerID="8e710e8225c654e6a31bca1ebd0e655555cee7092f40de86f93fcc066c82fe64" Nov 21 15:03:04 crc kubenswrapper[5133]: I1121 15:03:04.326531 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8e710e8225c654e6a31bca1ebd0e655555cee7092f40de86f93fcc066c82fe64"} err="failed to get container status \"8e710e8225c654e6a31bca1ebd0e655555cee7092f40de86f93fcc066c82fe64\": rpc error: code = NotFound desc = could not find container \"8e710e8225c654e6a31bca1ebd0e655555cee7092f40de86f93fcc066c82fe64\": container with ID starting with 8e710e8225c654e6a31bca1ebd0e655555cee7092f40de86f93fcc066c82fe64 not found: ID does not exist" Nov 21 15:03:04 crc kubenswrapper[5133]: I1121 15:03:04.326592 5133 scope.go:117] "RemoveContainer" containerID="8769884e3ff737795d75dc264c6a292a1ecfd86f183ad4e400536d9bd005e37f" Nov 21 15:03:04 crc kubenswrapper[5133]: E1121 15:03:04.326978 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8769884e3ff737795d75dc264c6a292a1ecfd86f183ad4e400536d9bd005e37f\": container with ID starting with 8769884e3ff737795d75dc264c6a292a1ecfd86f183ad4e400536d9bd005e37f not found: ID does not exist" containerID="8769884e3ff737795d75dc264c6a292a1ecfd86f183ad4e400536d9bd005e37f" Nov 21 15:03:04 crc kubenswrapper[5133]: I1121 15:03:04.327136 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8769884e3ff737795d75dc264c6a292a1ecfd86f183ad4e400536d9bd005e37f"} err="failed to get container status \"8769884e3ff737795d75dc264c6a292a1ecfd86f183ad4e400536d9bd005e37f\": rpc error: code = NotFound desc = could not find container \"8769884e3ff737795d75dc264c6a292a1ecfd86f183ad4e400536d9bd005e37f\": container with ID starting with 8769884e3ff737795d75dc264c6a292a1ecfd86f183ad4e400536d9bd005e37f not found: ID does not exist" Nov 21 15:03:04 crc kubenswrapper[5133]: I1121 15:03:04.327183 5133 scope.go:117] "RemoveContainer" containerID="bf7f754a5840905716f737b44098a192e7663d14038b4f8def5f92084002e581" Nov 21 15:03:04 crc kubenswrapper[5133]: E1121 15:03:04.327557 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bf7f754a5840905716f737b44098a192e7663d14038b4f8def5f92084002e581\": container with ID starting with bf7f754a5840905716f737b44098a192e7663d14038b4f8def5f92084002e581 not found: ID does not exist" containerID="bf7f754a5840905716f737b44098a192e7663d14038b4f8def5f92084002e581" Nov 21 15:03:04 crc kubenswrapper[5133]: I1121 15:03:04.327600 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bf7f754a5840905716f737b44098a192e7663d14038b4f8def5f92084002e581"} err="failed to get container status \"bf7f754a5840905716f737b44098a192e7663d14038b4f8def5f92084002e581\": rpc error: code = NotFound desc = could not find container \"bf7f754a5840905716f737b44098a192e7663d14038b4f8def5f92084002e581\": container with ID starting with bf7f754a5840905716f737b44098a192e7663d14038b4f8def5f92084002e581 not found: ID does not exist" Nov 21 15:03:04 crc kubenswrapper[5133]: I1121 15:03:04.472294 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9935133a-95e4-4fa7-aa6a-c118eda51908" path="/var/lib/kubelet/pods/9935133a-95e4-4fa7-aa6a-c118eda51908/volumes" Nov 21 15:03:53 crc kubenswrapper[5133]: I1121 15:03:53.310825 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 15:03:53 crc kubenswrapper[5133]: I1121 15:03:53.312535 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 15:04:23 crc kubenswrapper[5133]: I1121 15:04:23.310658 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 15:04:23 crc kubenswrapper[5133]: I1121 15:04:23.311338 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 15:04:53 crc kubenswrapper[5133]: I1121 15:04:53.311038 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 15:04:53 crc kubenswrapper[5133]: I1121 15:04:53.311743 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 15:04:53 crc kubenswrapper[5133]: I1121 15:04:53.311810 5133 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" Nov 21 15:04:53 crc kubenswrapper[5133]: I1121 15:04:53.312903 5133 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"1c97c65cd3efcaaa6482f47f143f5b4d2eeff6f58f86078a941ae91a1807c6d0"} pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 15:04:53 crc kubenswrapper[5133]: I1121 15:04:53.313064 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" containerID="cri-o://1c97c65cd3efcaaa6482f47f143f5b4d2eeff6f58f86078a941ae91a1807c6d0" gracePeriod=600 Nov 21 15:04:54 crc kubenswrapper[5133]: I1121 15:04:54.397635 5133 generic.go:334] "Generic (PLEG): container finished" podID="52f5a729-05d1-4f84-a216-1df3233af57d" containerID="1c97c65cd3efcaaa6482f47f143f5b4d2eeff6f58f86078a941ae91a1807c6d0" exitCode=0 Nov 21 15:04:54 crc kubenswrapper[5133]: I1121 15:04:54.397672 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" event={"ID":"52f5a729-05d1-4f84-a216-1df3233af57d","Type":"ContainerDied","Data":"1c97c65cd3efcaaa6482f47f143f5b4d2eeff6f58f86078a941ae91a1807c6d0"} Nov 21 15:04:54 crc kubenswrapper[5133]: I1121 15:04:54.398406 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" event={"ID":"52f5a729-05d1-4f84-a216-1df3233af57d","Type":"ContainerStarted","Data":"7a5c0c9b2a3bd64824e8d11ab9ec3268d4ce1e7c682c769e61779d3ac0b7de78"} Nov 21 15:04:54 crc kubenswrapper[5133]: I1121 15:04:54.398442 5133 scope.go:117] "RemoveContainer" containerID="804c4033e86e60d9085f68928ef5d8bfdfa7e39b4aaf598c9d7f7fcd731c283e" Nov 21 15:06:53 crc kubenswrapper[5133]: I1121 15:06:53.310590 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 15:06:53 crc kubenswrapper[5133]: I1121 15:06:53.311244 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 15:07:23 crc kubenswrapper[5133]: I1121 15:07:23.310832 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 15:07:23 crc kubenswrapper[5133]: I1121 15:07:23.311502 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 15:07:29 crc kubenswrapper[5133]: I1121 15:07:29.780889 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-4phv5"] Nov 21 15:07:29 crc kubenswrapper[5133]: E1121 15:07:29.781611 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9935133a-95e4-4fa7-aa6a-c118eda51908" containerName="extract-utilities" Nov 21 15:07:29 crc kubenswrapper[5133]: I1121 15:07:29.781622 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="9935133a-95e4-4fa7-aa6a-c118eda51908" containerName="extract-utilities" Nov 21 15:07:29 crc kubenswrapper[5133]: E1121 15:07:29.781657 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9935133a-95e4-4fa7-aa6a-c118eda51908" containerName="extract-content" Nov 21 15:07:29 crc kubenswrapper[5133]: I1121 15:07:29.781663 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="9935133a-95e4-4fa7-aa6a-c118eda51908" containerName="extract-content" Nov 21 15:07:29 crc kubenswrapper[5133]: E1121 15:07:29.781676 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9935133a-95e4-4fa7-aa6a-c118eda51908" containerName="registry-server" Nov 21 15:07:29 crc kubenswrapper[5133]: I1121 15:07:29.781684 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="9935133a-95e4-4fa7-aa6a-c118eda51908" containerName="registry-server" Nov 21 15:07:29 crc kubenswrapper[5133]: I1121 15:07:29.781856 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="9935133a-95e4-4fa7-aa6a-c118eda51908" containerName="registry-server" Nov 21 15:07:29 crc kubenswrapper[5133]: I1121 15:07:29.783735 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4phv5" Nov 21 15:07:29 crc kubenswrapper[5133]: I1121 15:07:29.814327 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-4phv5"] Nov 21 15:07:29 crc kubenswrapper[5133]: I1121 15:07:29.863282 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4872a1ac-b218-41f6-871e-b79c9768181a-utilities\") pod \"redhat-operators-4phv5\" (UID: \"4872a1ac-b218-41f6-871e-b79c9768181a\") " pod="openshift-marketplace/redhat-operators-4phv5" Nov 21 15:07:29 crc kubenswrapper[5133]: I1121 15:07:29.863591 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4872a1ac-b218-41f6-871e-b79c9768181a-catalog-content\") pod \"redhat-operators-4phv5\" (UID: \"4872a1ac-b218-41f6-871e-b79c9768181a\") " pod="openshift-marketplace/redhat-operators-4phv5" Nov 21 15:07:29 crc kubenswrapper[5133]: I1121 15:07:29.863613 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s42lm\" (UniqueName: \"kubernetes.io/projected/4872a1ac-b218-41f6-871e-b79c9768181a-kube-api-access-s42lm\") pod \"redhat-operators-4phv5\" (UID: \"4872a1ac-b218-41f6-871e-b79c9768181a\") " pod="openshift-marketplace/redhat-operators-4phv5" Nov 21 15:07:29 crc kubenswrapper[5133]: I1121 15:07:29.965209 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s42lm\" (UniqueName: \"kubernetes.io/projected/4872a1ac-b218-41f6-871e-b79c9768181a-kube-api-access-s42lm\") pod \"redhat-operators-4phv5\" (UID: \"4872a1ac-b218-41f6-871e-b79c9768181a\") " pod="openshift-marketplace/redhat-operators-4phv5" Nov 21 15:07:29 crc kubenswrapper[5133]: I1121 15:07:29.965266 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4872a1ac-b218-41f6-871e-b79c9768181a-catalog-content\") pod \"redhat-operators-4phv5\" (UID: \"4872a1ac-b218-41f6-871e-b79c9768181a\") " pod="openshift-marketplace/redhat-operators-4phv5" Nov 21 15:07:29 crc kubenswrapper[5133]: I1121 15:07:29.965424 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4872a1ac-b218-41f6-871e-b79c9768181a-utilities\") pod \"redhat-operators-4phv5\" (UID: \"4872a1ac-b218-41f6-871e-b79c9768181a\") " pod="openshift-marketplace/redhat-operators-4phv5" Nov 21 15:07:29 crc kubenswrapper[5133]: I1121 15:07:29.966253 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4872a1ac-b218-41f6-871e-b79c9768181a-utilities\") pod \"redhat-operators-4phv5\" (UID: \"4872a1ac-b218-41f6-871e-b79c9768181a\") " pod="openshift-marketplace/redhat-operators-4phv5" Nov 21 15:07:29 crc kubenswrapper[5133]: I1121 15:07:29.966839 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4872a1ac-b218-41f6-871e-b79c9768181a-catalog-content\") pod \"redhat-operators-4phv5\" (UID: \"4872a1ac-b218-41f6-871e-b79c9768181a\") " pod="openshift-marketplace/redhat-operators-4phv5" Nov 21 15:07:29 crc kubenswrapper[5133]: I1121 15:07:29.998238 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s42lm\" (UniqueName: \"kubernetes.io/projected/4872a1ac-b218-41f6-871e-b79c9768181a-kube-api-access-s42lm\") pod \"redhat-operators-4phv5\" (UID: \"4872a1ac-b218-41f6-871e-b79c9768181a\") " pod="openshift-marketplace/redhat-operators-4phv5" Nov 21 15:07:30 crc kubenswrapper[5133]: I1121 15:07:30.120123 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4phv5" Nov 21 15:07:30 crc kubenswrapper[5133]: I1121 15:07:30.652638 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-4phv5"] Nov 21 15:07:31 crc kubenswrapper[5133]: I1121 15:07:31.339774 5133 generic.go:334] "Generic (PLEG): container finished" podID="4872a1ac-b218-41f6-871e-b79c9768181a" containerID="aafc524c8a554bf00961e10289972fd7164cadc8005c21b93ad6e7cdf0fef496" exitCode=0 Nov 21 15:07:31 crc kubenswrapper[5133]: I1121 15:07:31.339930 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4phv5" event={"ID":"4872a1ac-b218-41f6-871e-b79c9768181a","Type":"ContainerDied","Data":"aafc524c8a554bf00961e10289972fd7164cadc8005c21b93ad6e7cdf0fef496"} Nov 21 15:07:31 crc kubenswrapper[5133]: I1121 15:07:31.340128 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4phv5" event={"ID":"4872a1ac-b218-41f6-871e-b79c9768181a","Type":"ContainerStarted","Data":"f91a87adb62106e1cfcf41038fbf738f0ba1f0728c718844401a99f1d068944e"} Nov 21 15:07:31 crc kubenswrapper[5133]: I1121 15:07:31.342579 5133 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 21 15:07:32 crc kubenswrapper[5133]: I1121 15:07:32.350539 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4phv5" event={"ID":"4872a1ac-b218-41f6-871e-b79c9768181a","Type":"ContainerStarted","Data":"ea38f302ac6797087e0215df54292646e509fe61d89fe95ec37f27aea0cad066"} Nov 21 15:07:34 crc kubenswrapper[5133]: I1121 15:07:34.376221 5133 generic.go:334] "Generic (PLEG): container finished" podID="4872a1ac-b218-41f6-871e-b79c9768181a" containerID="ea38f302ac6797087e0215df54292646e509fe61d89fe95ec37f27aea0cad066" exitCode=0 Nov 21 15:07:34 crc kubenswrapper[5133]: I1121 15:07:34.376785 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4phv5" event={"ID":"4872a1ac-b218-41f6-871e-b79c9768181a","Type":"ContainerDied","Data":"ea38f302ac6797087e0215df54292646e509fe61d89fe95ec37f27aea0cad066"} Nov 21 15:07:36 crc kubenswrapper[5133]: I1121 15:07:36.404774 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4phv5" event={"ID":"4872a1ac-b218-41f6-871e-b79c9768181a","Type":"ContainerStarted","Data":"d725e7370dada9f4ab7cfd91e902065125ef3eefd3cd59fc9b892d508da1fa25"} Nov 21 15:07:36 crc kubenswrapper[5133]: I1121 15:07:36.437952 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-4phv5" podStartSLOduration=3.220317008 podStartE2EDuration="7.437926274s" podCreationTimestamp="2025-11-21 15:07:29 +0000 UTC" firstStartedPulling="2025-11-21 15:07:31.342386672 +0000 UTC m=+5111.140218920" lastFinishedPulling="2025-11-21 15:07:35.559995928 +0000 UTC m=+5115.357828186" observedRunningTime="2025-11-21 15:07:36.432025337 +0000 UTC m=+5116.229857595" watchObservedRunningTime="2025-11-21 15:07:36.437926274 +0000 UTC m=+5116.235758562" Nov 21 15:07:40 crc kubenswrapper[5133]: I1121 15:07:40.121378 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-4phv5" Nov 21 15:07:40 crc kubenswrapper[5133]: I1121 15:07:40.122068 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-4phv5" Nov 21 15:07:41 crc kubenswrapper[5133]: I1121 15:07:41.168746 5133 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-4phv5" podUID="4872a1ac-b218-41f6-871e-b79c9768181a" containerName="registry-server" probeResult="failure" output=< Nov 21 15:07:41 crc kubenswrapper[5133]: timeout: failed to connect service ":50051" within 1s Nov 21 15:07:41 crc kubenswrapper[5133]: > Nov 21 15:07:50 crc kubenswrapper[5133]: I1121 15:07:50.176274 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-4phv5" Nov 21 15:07:50 crc kubenswrapper[5133]: I1121 15:07:50.255481 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-4phv5" Nov 21 15:07:50 crc kubenswrapper[5133]: I1121 15:07:50.427126 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-4phv5"] Nov 21 15:07:51 crc kubenswrapper[5133]: I1121 15:07:51.572116 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-4phv5" podUID="4872a1ac-b218-41f6-871e-b79c9768181a" containerName="registry-server" containerID="cri-o://d725e7370dada9f4ab7cfd91e902065125ef3eefd3cd59fc9b892d508da1fa25" gracePeriod=2 Nov 21 15:07:52 crc kubenswrapper[5133]: I1121 15:07:52.487540 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4phv5" Nov 21 15:07:52 crc kubenswrapper[5133]: I1121 15:07:52.566849 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4872a1ac-b218-41f6-871e-b79c9768181a-catalog-content\") pod \"4872a1ac-b218-41f6-871e-b79c9768181a\" (UID: \"4872a1ac-b218-41f6-871e-b79c9768181a\") " Nov 21 15:07:52 crc kubenswrapper[5133]: I1121 15:07:52.567077 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4872a1ac-b218-41f6-871e-b79c9768181a-utilities\") pod \"4872a1ac-b218-41f6-871e-b79c9768181a\" (UID: \"4872a1ac-b218-41f6-871e-b79c9768181a\") " Nov 21 15:07:52 crc kubenswrapper[5133]: I1121 15:07:52.567464 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s42lm\" (UniqueName: \"kubernetes.io/projected/4872a1ac-b218-41f6-871e-b79c9768181a-kube-api-access-s42lm\") pod \"4872a1ac-b218-41f6-871e-b79c9768181a\" (UID: \"4872a1ac-b218-41f6-871e-b79c9768181a\") " Nov 21 15:07:52 crc kubenswrapper[5133]: I1121 15:07:52.569471 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4872a1ac-b218-41f6-871e-b79c9768181a-utilities" (OuterVolumeSpecName: "utilities") pod "4872a1ac-b218-41f6-871e-b79c9768181a" (UID: "4872a1ac-b218-41f6-871e-b79c9768181a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:07:52 crc kubenswrapper[5133]: I1121 15:07:52.577824 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4872a1ac-b218-41f6-871e-b79c9768181a-kube-api-access-s42lm" (OuterVolumeSpecName: "kube-api-access-s42lm") pod "4872a1ac-b218-41f6-871e-b79c9768181a" (UID: "4872a1ac-b218-41f6-871e-b79c9768181a"). InnerVolumeSpecName "kube-api-access-s42lm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:07:52 crc kubenswrapper[5133]: I1121 15:07:52.584444 5133 generic.go:334] "Generic (PLEG): container finished" podID="4872a1ac-b218-41f6-871e-b79c9768181a" containerID="d725e7370dada9f4ab7cfd91e902065125ef3eefd3cd59fc9b892d508da1fa25" exitCode=0 Nov 21 15:07:52 crc kubenswrapper[5133]: I1121 15:07:52.584501 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4phv5" event={"ID":"4872a1ac-b218-41f6-871e-b79c9768181a","Type":"ContainerDied","Data":"d725e7370dada9f4ab7cfd91e902065125ef3eefd3cd59fc9b892d508da1fa25"} Nov 21 15:07:52 crc kubenswrapper[5133]: I1121 15:07:52.584536 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4phv5" event={"ID":"4872a1ac-b218-41f6-871e-b79c9768181a","Type":"ContainerDied","Data":"f91a87adb62106e1cfcf41038fbf738f0ba1f0728c718844401a99f1d068944e"} Nov 21 15:07:52 crc kubenswrapper[5133]: I1121 15:07:52.584557 5133 scope.go:117] "RemoveContainer" containerID="d725e7370dada9f4ab7cfd91e902065125ef3eefd3cd59fc9b892d508da1fa25" Nov 21 15:07:52 crc kubenswrapper[5133]: I1121 15:07:52.584739 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4phv5" Nov 21 15:07:52 crc kubenswrapper[5133]: I1121 15:07:52.643552 5133 scope.go:117] "RemoveContainer" containerID="ea38f302ac6797087e0215df54292646e509fe61d89fe95ec37f27aea0cad066" Nov 21 15:07:52 crc kubenswrapper[5133]: I1121 15:07:52.670070 5133 scope.go:117] "RemoveContainer" containerID="aafc524c8a554bf00961e10289972fd7164cadc8005c21b93ad6e7cdf0fef496" Nov 21 15:07:52 crc kubenswrapper[5133]: I1121 15:07:52.670401 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s42lm\" (UniqueName: \"kubernetes.io/projected/4872a1ac-b218-41f6-871e-b79c9768181a-kube-api-access-s42lm\") on node \"crc\" DevicePath \"\"" Nov 21 15:07:52 crc kubenswrapper[5133]: I1121 15:07:52.670432 5133 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4872a1ac-b218-41f6-871e-b79c9768181a-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 15:07:52 crc kubenswrapper[5133]: I1121 15:07:52.697979 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4872a1ac-b218-41f6-871e-b79c9768181a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4872a1ac-b218-41f6-871e-b79c9768181a" (UID: "4872a1ac-b218-41f6-871e-b79c9768181a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:07:52 crc kubenswrapper[5133]: I1121 15:07:52.772405 5133 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4872a1ac-b218-41f6-871e-b79c9768181a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 15:07:52 crc kubenswrapper[5133]: I1121 15:07:52.809588 5133 scope.go:117] "RemoveContainer" containerID="d725e7370dada9f4ab7cfd91e902065125ef3eefd3cd59fc9b892d508da1fa25" Nov 21 15:07:52 crc kubenswrapper[5133]: E1121 15:07:52.810039 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d725e7370dada9f4ab7cfd91e902065125ef3eefd3cd59fc9b892d508da1fa25\": container with ID starting with d725e7370dada9f4ab7cfd91e902065125ef3eefd3cd59fc9b892d508da1fa25 not found: ID does not exist" containerID="d725e7370dada9f4ab7cfd91e902065125ef3eefd3cd59fc9b892d508da1fa25" Nov 21 15:07:52 crc kubenswrapper[5133]: I1121 15:07:52.810079 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d725e7370dada9f4ab7cfd91e902065125ef3eefd3cd59fc9b892d508da1fa25"} err="failed to get container status \"d725e7370dada9f4ab7cfd91e902065125ef3eefd3cd59fc9b892d508da1fa25\": rpc error: code = NotFound desc = could not find container \"d725e7370dada9f4ab7cfd91e902065125ef3eefd3cd59fc9b892d508da1fa25\": container with ID starting with d725e7370dada9f4ab7cfd91e902065125ef3eefd3cd59fc9b892d508da1fa25 not found: ID does not exist" Nov 21 15:07:52 crc kubenswrapper[5133]: I1121 15:07:52.810101 5133 scope.go:117] "RemoveContainer" containerID="ea38f302ac6797087e0215df54292646e509fe61d89fe95ec37f27aea0cad066" Nov 21 15:07:52 crc kubenswrapper[5133]: E1121 15:07:52.810293 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ea38f302ac6797087e0215df54292646e509fe61d89fe95ec37f27aea0cad066\": container with ID starting with ea38f302ac6797087e0215df54292646e509fe61d89fe95ec37f27aea0cad066 not found: ID does not exist" containerID="ea38f302ac6797087e0215df54292646e509fe61d89fe95ec37f27aea0cad066" Nov 21 15:07:52 crc kubenswrapper[5133]: I1121 15:07:52.810325 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ea38f302ac6797087e0215df54292646e509fe61d89fe95ec37f27aea0cad066"} err="failed to get container status \"ea38f302ac6797087e0215df54292646e509fe61d89fe95ec37f27aea0cad066\": rpc error: code = NotFound desc = could not find container \"ea38f302ac6797087e0215df54292646e509fe61d89fe95ec37f27aea0cad066\": container with ID starting with ea38f302ac6797087e0215df54292646e509fe61d89fe95ec37f27aea0cad066 not found: ID does not exist" Nov 21 15:07:52 crc kubenswrapper[5133]: I1121 15:07:52.810346 5133 scope.go:117] "RemoveContainer" containerID="aafc524c8a554bf00961e10289972fd7164cadc8005c21b93ad6e7cdf0fef496" Nov 21 15:07:52 crc kubenswrapper[5133]: E1121 15:07:52.810668 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"aafc524c8a554bf00961e10289972fd7164cadc8005c21b93ad6e7cdf0fef496\": container with ID starting with aafc524c8a554bf00961e10289972fd7164cadc8005c21b93ad6e7cdf0fef496 not found: ID does not exist" containerID="aafc524c8a554bf00961e10289972fd7164cadc8005c21b93ad6e7cdf0fef496" Nov 21 15:07:52 crc kubenswrapper[5133]: I1121 15:07:52.810693 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aafc524c8a554bf00961e10289972fd7164cadc8005c21b93ad6e7cdf0fef496"} err="failed to get container status \"aafc524c8a554bf00961e10289972fd7164cadc8005c21b93ad6e7cdf0fef496\": rpc error: code = NotFound desc = could not find container \"aafc524c8a554bf00961e10289972fd7164cadc8005c21b93ad6e7cdf0fef496\": container with ID starting with aafc524c8a554bf00961e10289972fd7164cadc8005c21b93ad6e7cdf0fef496 not found: ID does not exist" Nov 21 15:07:52 crc kubenswrapper[5133]: I1121 15:07:52.939735 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-4phv5"] Nov 21 15:07:52 crc kubenswrapper[5133]: I1121 15:07:52.956282 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-4phv5"] Nov 21 15:07:53 crc kubenswrapper[5133]: I1121 15:07:53.311442 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 15:07:53 crc kubenswrapper[5133]: I1121 15:07:53.311852 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 15:07:53 crc kubenswrapper[5133]: I1121 15:07:53.311914 5133 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" Nov 21 15:07:53 crc kubenswrapper[5133]: I1121 15:07:53.312926 5133 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"7a5c0c9b2a3bd64824e8d11ab9ec3268d4ce1e7c682c769e61779d3ac0b7de78"} pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 15:07:53 crc kubenswrapper[5133]: I1121 15:07:53.313072 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" containerID="cri-o://7a5c0c9b2a3bd64824e8d11ab9ec3268d4ce1e7c682c769e61779d3ac0b7de78" gracePeriod=600 Nov 21 15:07:54 crc kubenswrapper[5133]: E1121 15:07:54.205425 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:07:54 crc kubenswrapper[5133]: I1121 15:07:54.472245 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4872a1ac-b218-41f6-871e-b79c9768181a" path="/var/lib/kubelet/pods/4872a1ac-b218-41f6-871e-b79c9768181a/volumes" Nov 21 15:07:54 crc kubenswrapper[5133]: I1121 15:07:54.604658 5133 generic.go:334] "Generic (PLEG): container finished" podID="52f5a729-05d1-4f84-a216-1df3233af57d" containerID="7a5c0c9b2a3bd64824e8d11ab9ec3268d4ce1e7c682c769e61779d3ac0b7de78" exitCode=0 Nov 21 15:07:54 crc kubenswrapper[5133]: I1121 15:07:54.604720 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" event={"ID":"52f5a729-05d1-4f84-a216-1df3233af57d","Type":"ContainerDied","Data":"7a5c0c9b2a3bd64824e8d11ab9ec3268d4ce1e7c682c769e61779d3ac0b7de78"} Nov 21 15:07:54 crc kubenswrapper[5133]: I1121 15:07:54.604792 5133 scope.go:117] "RemoveContainer" containerID="1c97c65cd3efcaaa6482f47f143f5b4d2eeff6f58f86078a941ae91a1807c6d0" Nov 21 15:07:54 crc kubenswrapper[5133]: I1121 15:07:54.605406 5133 scope.go:117] "RemoveContainer" containerID="7a5c0c9b2a3bd64824e8d11ab9ec3268d4ce1e7c682c769e61779d3ac0b7de78" Nov 21 15:07:54 crc kubenswrapper[5133]: E1121 15:07:54.605752 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:07:55 crc kubenswrapper[5133]: I1121 15:07:55.843356 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-7gzzb"] Nov 21 15:07:55 crc kubenswrapper[5133]: E1121 15:07:55.844614 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4872a1ac-b218-41f6-871e-b79c9768181a" containerName="registry-server" Nov 21 15:07:55 crc kubenswrapper[5133]: I1121 15:07:55.844635 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="4872a1ac-b218-41f6-871e-b79c9768181a" containerName="registry-server" Nov 21 15:07:55 crc kubenswrapper[5133]: E1121 15:07:55.844736 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4872a1ac-b218-41f6-871e-b79c9768181a" containerName="extract-utilities" Nov 21 15:07:55 crc kubenswrapper[5133]: I1121 15:07:55.844745 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="4872a1ac-b218-41f6-871e-b79c9768181a" containerName="extract-utilities" Nov 21 15:07:55 crc kubenswrapper[5133]: E1121 15:07:55.844757 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4872a1ac-b218-41f6-871e-b79c9768181a" containerName="extract-content" Nov 21 15:07:55 crc kubenswrapper[5133]: I1121 15:07:55.844765 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="4872a1ac-b218-41f6-871e-b79c9768181a" containerName="extract-content" Nov 21 15:07:55 crc kubenswrapper[5133]: I1121 15:07:55.845118 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="4872a1ac-b218-41f6-871e-b79c9768181a" containerName="registry-server" Nov 21 15:07:55 crc kubenswrapper[5133]: I1121 15:07:55.847637 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7gzzb" Nov 21 15:07:55 crc kubenswrapper[5133]: I1121 15:07:55.863594 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-7gzzb"] Nov 21 15:07:55 crc kubenswrapper[5133]: I1121 15:07:55.935670 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j72pq\" (UniqueName: \"kubernetes.io/projected/ff388947-55a1-4be6-99ba-11955c5cf2b2-kube-api-access-j72pq\") pod \"community-operators-7gzzb\" (UID: \"ff388947-55a1-4be6-99ba-11955c5cf2b2\") " pod="openshift-marketplace/community-operators-7gzzb" Nov 21 15:07:55 crc kubenswrapper[5133]: I1121 15:07:55.935717 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ff388947-55a1-4be6-99ba-11955c5cf2b2-utilities\") pod \"community-operators-7gzzb\" (UID: \"ff388947-55a1-4be6-99ba-11955c5cf2b2\") " pod="openshift-marketplace/community-operators-7gzzb" Nov 21 15:07:55 crc kubenswrapper[5133]: I1121 15:07:55.935894 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ff388947-55a1-4be6-99ba-11955c5cf2b2-catalog-content\") pod \"community-operators-7gzzb\" (UID: \"ff388947-55a1-4be6-99ba-11955c5cf2b2\") " pod="openshift-marketplace/community-operators-7gzzb" Nov 21 15:07:56 crc kubenswrapper[5133]: I1121 15:07:56.037763 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ff388947-55a1-4be6-99ba-11955c5cf2b2-catalog-content\") pod \"community-operators-7gzzb\" (UID: \"ff388947-55a1-4be6-99ba-11955c5cf2b2\") " pod="openshift-marketplace/community-operators-7gzzb" Nov 21 15:07:56 crc kubenswrapper[5133]: I1121 15:07:56.037888 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j72pq\" (UniqueName: \"kubernetes.io/projected/ff388947-55a1-4be6-99ba-11955c5cf2b2-kube-api-access-j72pq\") pod \"community-operators-7gzzb\" (UID: \"ff388947-55a1-4be6-99ba-11955c5cf2b2\") " pod="openshift-marketplace/community-operators-7gzzb" Nov 21 15:07:56 crc kubenswrapper[5133]: I1121 15:07:56.037910 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ff388947-55a1-4be6-99ba-11955c5cf2b2-utilities\") pod \"community-operators-7gzzb\" (UID: \"ff388947-55a1-4be6-99ba-11955c5cf2b2\") " pod="openshift-marketplace/community-operators-7gzzb" Nov 21 15:07:56 crc kubenswrapper[5133]: I1121 15:07:56.038335 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ff388947-55a1-4be6-99ba-11955c5cf2b2-catalog-content\") pod \"community-operators-7gzzb\" (UID: \"ff388947-55a1-4be6-99ba-11955c5cf2b2\") " pod="openshift-marketplace/community-operators-7gzzb" Nov 21 15:07:56 crc kubenswrapper[5133]: I1121 15:07:56.038434 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ff388947-55a1-4be6-99ba-11955c5cf2b2-utilities\") pod \"community-operators-7gzzb\" (UID: \"ff388947-55a1-4be6-99ba-11955c5cf2b2\") " pod="openshift-marketplace/community-operators-7gzzb" Nov 21 15:07:56 crc kubenswrapper[5133]: I1121 15:07:56.057462 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j72pq\" (UniqueName: \"kubernetes.io/projected/ff388947-55a1-4be6-99ba-11955c5cf2b2-kube-api-access-j72pq\") pod \"community-operators-7gzzb\" (UID: \"ff388947-55a1-4be6-99ba-11955c5cf2b2\") " pod="openshift-marketplace/community-operators-7gzzb" Nov 21 15:07:56 crc kubenswrapper[5133]: I1121 15:07:56.184719 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7gzzb" Nov 21 15:07:56 crc kubenswrapper[5133]: I1121 15:07:56.703293 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-7gzzb"] Nov 21 15:07:58 crc kubenswrapper[5133]: I1121 15:07:58.183194 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7gzzb" event={"ID":"ff388947-55a1-4be6-99ba-11955c5cf2b2","Type":"ContainerStarted","Data":"502e6caed8b6b49df272e29506af26b3df651825f22baa3acf4ac7d9c49426ce"} Nov 21 15:07:59 crc kubenswrapper[5133]: I1121 15:07:59.195546 5133 generic.go:334] "Generic (PLEG): container finished" podID="ff388947-55a1-4be6-99ba-11955c5cf2b2" containerID="88e00bbce34de6a9258865b2eccf51feb84ace28f572ef066c0eded608613253" exitCode=0 Nov 21 15:07:59 crc kubenswrapper[5133]: I1121 15:07:59.196308 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7gzzb" event={"ID":"ff388947-55a1-4be6-99ba-11955c5cf2b2","Type":"ContainerDied","Data":"88e00bbce34de6a9258865b2eccf51feb84ace28f572ef066c0eded608613253"} Nov 21 15:08:01 crc kubenswrapper[5133]: I1121 15:08:01.229435 5133 generic.go:334] "Generic (PLEG): container finished" podID="ff388947-55a1-4be6-99ba-11955c5cf2b2" containerID="207e11f4528c71406011e70e6c0f52772edb9f3cc13d80f9852bed78c5010474" exitCode=0 Nov 21 15:08:01 crc kubenswrapper[5133]: I1121 15:08:01.229497 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7gzzb" event={"ID":"ff388947-55a1-4be6-99ba-11955c5cf2b2","Type":"ContainerDied","Data":"207e11f4528c71406011e70e6c0f52772edb9f3cc13d80f9852bed78c5010474"} Nov 21 15:08:02 crc kubenswrapper[5133]: I1121 15:08:02.246225 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7gzzb" event={"ID":"ff388947-55a1-4be6-99ba-11955c5cf2b2","Type":"ContainerStarted","Data":"d208458e90187dc37e6de3e6c2c6509a5d188287ca186f16703824ec081d2080"} Nov 21 15:08:02 crc kubenswrapper[5133]: I1121 15:08:02.272010 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-7gzzb" podStartSLOduration=4.844409435 podStartE2EDuration="7.271972928s" podCreationTimestamp="2025-11-21 15:07:55 +0000 UTC" firstStartedPulling="2025-11-21 15:07:59.198660126 +0000 UTC m=+5138.996492404" lastFinishedPulling="2025-11-21 15:08:01.626223609 +0000 UTC m=+5141.424055897" observedRunningTime="2025-11-21 15:08:02.262362972 +0000 UTC m=+5142.060195230" watchObservedRunningTime="2025-11-21 15:08:02.271972928 +0000 UTC m=+5142.069805176" Nov 21 15:08:06 crc kubenswrapper[5133]: I1121 15:08:06.185311 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-7gzzb" Nov 21 15:08:06 crc kubenswrapper[5133]: I1121 15:08:06.185950 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-7gzzb" Nov 21 15:08:06 crc kubenswrapper[5133]: I1121 15:08:06.263682 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-7gzzb" Nov 21 15:08:06 crc kubenswrapper[5133]: I1121 15:08:06.344780 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-7gzzb" Nov 21 15:08:06 crc kubenswrapper[5133]: I1121 15:08:06.512446 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-7gzzb"] Nov 21 15:08:08 crc kubenswrapper[5133]: I1121 15:08:08.307249 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-7gzzb" podUID="ff388947-55a1-4be6-99ba-11955c5cf2b2" containerName="registry-server" containerID="cri-o://d208458e90187dc37e6de3e6c2c6509a5d188287ca186f16703824ec081d2080" gracePeriod=2 Nov 21 15:08:08 crc kubenswrapper[5133]: I1121 15:08:08.458359 5133 scope.go:117] "RemoveContainer" containerID="7a5c0c9b2a3bd64824e8d11ab9ec3268d4ce1e7c682c769e61779d3ac0b7de78" Nov 21 15:08:08 crc kubenswrapper[5133]: E1121 15:08:08.458959 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:08:09 crc kubenswrapper[5133]: I1121 15:08:09.321887 5133 generic.go:334] "Generic (PLEG): container finished" podID="ff388947-55a1-4be6-99ba-11955c5cf2b2" containerID="d208458e90187dc37e6de3e6c2c6509a5d188287ca186f16703824ec081d2080" exitCode=0 Nov 21 15:08:09 crc kubenswrapper[5133]: I1121 15:08:09.321953 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7gzzb" event={"ID":"ff388947-55a1-4be6-99ba-11955c5cf2b2","Type":"ContainerDied","Data":"d208458e90187dc37e6de3e6c2c6509a5d188287ca186f16703824ec081d2080"} Nov 21 15:08:09 crc kubenswrapper[5133]: I1121 15:08:09.322289 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7gzzb" event={"ID":"ff388947-55a1-4be6-99ba-11955c5cf2b2","Type":"ContainerDied","Data":"502e6caed8b6b49df272e29506af26b3df651825f22baa3acf4ac7d9c49426ce"} Nov 21 15:08:09 crc kubenswrapper[5133]: I1121 15:08:09.322306 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="502e6caed8b6b49df272e29506af26b3df651825f22baa3acf4ac7d9c49426ce" Nov 21 15:08:09 crc kubenswrapper[5133]: I1121 15:08:09.365301 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7gzzb" Nov 21 15:08:09 crc kubenswrapper[5133]: I1121 15:08:09.455779 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ff388947-55a1-4be6-99ba-11955c5cf2b2-utilities\") pod \"ff388947-55a1-4be6-99ba-11955c5cf2b2\" (UID: \"ff388947-55a1-4be6-99ba-11955c5cf2b2\") " Nov 21 15:08:09 crc kubenswrapper[5133]: I1121 15:08:09.455895 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j72pq\" (UniqueName: \"kubernetes.io/projected/ff388947-55a1-4be6-99ba-11955c5cf2b2-kube-api-access-j72pq\") pod \"ff388947-55a1-4be6-99ba-11955c5cf2b2\" (UID: \"ff388947-55a1-4be6-99ba-11955c5cf2b2\") " Nov 21 15:08:09 crc kubenswrapper[5133]: I1121 15:08:09.456093 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ff388947-55a1-4be6-99ba-11955c5cf2b2-catalog-content\") pod \"ff388947-55a1-4be6-99ba-11955c5cf2b2\" (UID: \"ff388947-55a1-4be6-99ba-11955c5cf2b2\") " Nov 21 15:08:09 crc kubenswrapper[5133]: I1121 15:08:09.456644 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ff388947-55a1-4be6-99ba-11955c5cf2b2-utilities" (OuterVolumeSpecName: "utilities") pod "ff388947-55a1-4be6-99ba-11955c5cf2b2" (UID: "ff388947-55a1-4be6-99ba-11955c5cf2b2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:08:09 crc kubenswrapper[5133]: I1121 15:08:09.461179 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ff388947-55a1-4be6-99ba-11955c5cf2b2-kube-api-access-j72pq" (OuterVolumeSpecName: "kube-api-access-j72pq") pod "ff388947-55a1-4be6-99ba-11955c5cf2b2" (UID: "ff388947-55a1-4be6-99ba-11955c5cf2b2"). InnerVolumeSpecName "kube-api-access-j72pq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:08:09 crc kubenswrapper[5133]: I1121 15:08:09.516294 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ff388947-55a1-4be6-99ba-11955c5cf2b2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ff388947-55a1-4be6-99ba-11955c5cf2b2" (UID: "ff388947-55a1-4be6-99ba-11955c5cf2b2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:08:09 crc kubenswrapper[5133]: I1121 15:08:09.557924 5133 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ff388947-55a1-4be6-99ba-11955c5cf2b2-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 15:08:09 crc kubenswrapper[5133]: I1121 15:08:09.558224 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j72pq\" (UniqueName: \"kubernetes.io/projected/ff388947-55a1-4be6-99ba-11955c5cf2b2-kube-api-access-j72pq\") on node \"crc\" DevicePath \"\"" Nov 21 15:08:09 crc kubenswrapper[5133]: I1121 15:08:09.558284 5133 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ff388947-55a1-4be6-99ba-11955c5cf2b2-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 15:08:10 crc kubenswrapper[5133]: I1121 15:08:10.334943 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7gzzb" Nov 21 15:08:10 crc kubenswrapper[5133]: I1121 15:08:10.401653 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-7gzzb"] Nov 21 15:08:10 crc kubenswrapper[5133]: I1121 15:08:10.421486 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-7gzzb"] Nov 21 15:08:10 crc kubenswrapper[5133]: I1121 15:08:10.468869 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ff388947-55a1-4be6-99ba-11955c5cf2b2" path="/var/lib/kubelet/pods/ff388947-55a1-4be6-99ba-11955c5cf2b2/volumes" Nov 21 15:08:19 crc kubenswrapper[5133]: I1121 15:08:19.458455 5133 scope.go:117] "RemoveContainer" containerID="7a5c0c9b2a3bd64824e8d11ab9ec3268d4ce1e7c682c769e61779d3ac0b7de78" Nov 21 15:08:19 crc kubenswrapper[5133]: E1121 15:08:19.460903 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:08:31 crc kubenswrapper[5133]: I1121 15:08:31.457807 5133 scope.go:117] "RemoveContainer" containerID="7a5c0c9b2a3bd64824e8d11ab9ec3268d4ce1e7c682c769e61779d3ac0b7de78" Nov 21 15:08:31 crc kubenswrapper[5133]: E1121 15:08:31.458565 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:08:44 crc kubenswrapper[5133]: I1121 15:08:44.458273 5133 scope.go:117] "RemoveContainer" containerID="7a5c0c9b2a3bd64824e8d11ab9ec3268d4ce1e7c682c769e61779d3ac0b7de78" Nov 21 15:08:44 crc kubenswrapper[5133]: E1121 15:08:44.459173 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:08:58 crc kubenswrapper[5133]: I1121 15:08:58.458052 5133 scope.go:117] "RemoveContainer" containerID="7a5c0c9b2a3bd64824e8d11ab9ec3268d4ce1e7c682c769e61779d3ac0b7de78" Nov 21 15:08:58 crc kubenswrapper[5133]: E1121 15:08:58.459255 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:09:11 crc kubenswrapper[5133]: I1121 15:09:11.457856 5133 scope.go:117] "RemoveContainer" containerID="7a5c0c9b2a3bd64824e8d11ab9ec3268d4ce1e7c682c769e61779d3ac0b7de78" Nov 21 15:09:11 crc kubenswrapper[5133]: E1121 15:09:11.458687 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:09:25 crc kubenswrapper[5133]: I1121 15:09:25.457961 5133 scope.go:117] "RemoveContainer" containerID="7a5c0c9b2a3bd64824e8d11ab9ec3268d4ce1e7c682c769e61779d3ac0b7de78" Nov 21 15:09:25 crc kubenswrapper[5133]: E1121 15:09:25.458764 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:09:40 crc kubenswrapper[5133]: I1121 15:09:40.458445 5133 scope.go:117] "RemoveContainer" containerID="7a5c0c9b2a3bd64824e8d11ab9ec3268d4ce1e7c682c769e61779d3ac0b7de78" Nov 21 15:09:40 crc kubenswrapper[5133]: E1121 15:09:40.459136 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:09:53 crc kubenswrapper[5133]: I1121 15:09:53.457535 5133 scope.go:117] "RemoveContainer" containerID="7a5c0c9b2a3bd64824e8d11ab9ec3268d4ce1e7c682c769e61779d3ac0b7de78" Nov 21 15:09:53 crc kubenswrapper[5133]: E1121 15:09:53.459343 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:10:06 crc kubenswrapper[5133]: I1121 15:10:06.457902 5133 scope.go:117] "RemoveContainer" containerID="7a5c0c9b2a3bd64824e8d11ab9ec3268d4ce1e7c682c769e61779d3ac0b7de78" Nov 21 15:10:06 crc kubenswrapper[5133]: E1121 15:10:06.458719 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:10:20 crc kubenswrapper[5133]: I1121 15:10:20.458090 5133 scope.go:117] "RemoveContainer" containerID="7a5c0c9b2a3bd64824e8d11ab9ec3268d4ce1e7c682c769e61779d3ac0b7de78" Nov 21 15:10:20 crc kubenswrapper[5133]: E1121 15:10:20.458878 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:10:35 crc kubenswrapper[5133]: I1121 15:10:35.457728 5133 scope.go:117] "RemoveContainer" containerID="7a5c0c9b2a3bd64824e8d11ab9ec3268d4ce1e7c682c769e61779d3ac0b7de78" Nov 21 15:10:35 crc kubenswrapper[5133]: E1121 15:10:35.458474 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:10:46 crc kubenswrapper[5133]: I1121 15:10:46.458111 5133 scope.go:117] "RemoveContainer" containerID="7a5c0c9b2a3bd64824e8d11ab9ec3268d4ce1e7c682c769e61779d3ac0b7de78" Nov 21 15:10:46 crc kubenswrapper[5133]: E1121 15:10:46.459274 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:11:00 crc kubenswrapper[5133]: I1121 15:11:00.457921 5133 scope.go:117] "RemoveContainer" containerID="7a5c0c9b2a3bd64824e8d11ab9ec3268d4ce1e7c682c769e61779d3ac0b7de78" Nov 21 15:11:00 crc kubenswrapper[5133]: E1121 15:11:00.459043 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:11:13 crc kubenswrapper[5133]: I1121 15:11:13.458112 5133 scope.go:117] "RemoveContainer" containerID="7a5c0c9b2a3bd64824e8d11ab9ec3268d4ce1e7c682c769e61779d3ac0b7de78" Nov 21 15:11:13 crc kubenswrapper[5133]: E1121 15:11:13.459138 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:11:25 crc kubenswrapper[5133]: I1121 15:11:25.457382 5133 scope.go:117] "RemoveContainer" containerID="7a5c0c9b2a3bd64824e8d11ab9ec3268d4ce1e7c682c769e61779d3ac0b7de78" Nov 21 15:11:25 crc kubenswrapper[5133]: E1121 15:11:25.458294 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:11:36 crc kubenswrapper[5133]: I1121 15:11:36.502922 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-687zf"] Nov 21 15:11:36 crc kubenswrapper[5133]: E1121 15:11:36.504101 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff388947-55a1-4be6-99ba-11955c5cf2b2" containerName="extract-utilities" Nov 21 15:11:36 crc kubenswrapper[5133]: I1121 15:11:36.504120 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff388947-55a1-4be6-99ba-11955c5cf2b2" containerName="extract-utilities" Nov 21 15:11:36 crc kubenswrapper[5133]: E1121 15:11:36.504148 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff388947-55a1-4be6-99ba-11955c5cf2b2" containerName="extract-content" Nov 21 15:11:36 crc kubenswrapper[5133]: I1121 15:11:36.504154 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff388947-55a1-4be6-99ba-11955c5cf2b2" containerName="extract-content" Nov 21 15:11:36 crc kubenswrapper[5133]: E1121 15:11:36.504167 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff388947-55a1-4be6-99ba-11955c5cf2b2" containerName="registry-server" Nov 21 15:11:36 crc kubenswrapper[5133]: I1121 15:11:36.504173 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff388947-55a1-4be6-99ba-11955c5cf2b2" containerName="registry-server" Nov 21 15:11:36 crc kubenswrapper[5133]: I1121 15:11:36.504363 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="ff388947-55a1-4be6-99ba-11955c5cf2b2" containerName="registry-server" Nov 21 15:11:36 crc kubenswrapper[5133]: I1121 15:11:36.505736 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-687zf" Nov 21 15:11:36 crc kubenswrapper[5133]: I1121 15:11:36.528350 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-687zf"] Nov 21 15:11:36 crc kubenswrapper[5133]: I1121 15:11:36.648644 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7cb5cd09-28b4-41ca-a8ca-21fa6b4e3186-catalog-content\") pod \"certified-operators-687zf\" (UID: \"7cb5cd09-28b4-41ca-a8ca-21fa6b4e3186\") " pod="openshift-marketplace/certified-operators-687zf" Nov 21 15:11:36 crc kubenswrapper[5133]: I1121 15:11:36.648746 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-599gr\" (UniqueName: \"kubernetes.io/projected/7cb5cd09-28b4-41ca-a8ca-21fa6b4e3186-kube-api-access-599gr\") pod \"certified-operators-687zf\" (UID: \"7cb5cd09-28b4-41ca-a8ca-21fa6b4e3186\") " pod="openshift-marketplace/certified-operators-687zf" Nov 21 15:11:36 crc kubenswrapper[5133]: I1121 15:11:36.649144 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7cb5cd09-28b4-41ca-a8ca-21fa6b4e3186-utilities\") pod \"certified-operators-687zf\" (UID: \"7cb5cd09-28b4-41ca-a8ca-21fa6b4e3186\") " pod="openshift-marketplace/certified-operators-687zf" Nov 21 15:11:36 crc kubenswrapper[5133]: I1121 15:11:36.751153 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7cb5cd09-28b4-41ca-a8ca-21fa6b4e3186-utilities\") pod \"certified-operators-687zf\" (UID: \"7cb5cd09-28b4-41ca-a8ca-21fa6b4e3186\") " pod="openshift-marketplace/certified-operators-687zf" Nov 21 15:11:36 crc kubenswrapper[5133]: I1121 15:11:36.751297 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7cb5cd09-28b4-41ca-a8ca-21fa6b4e3186-catalog-content\") pod \"certified-operators-687zf\" (UID: \"7cb5cd09-28b4-41ca-a8ca-21fa6b4e3186\") " pod="openshift-marketplace/certified-operators-687zf" Nov 21 15:11:36 crc kubenswrapper[5133]: I1121 15:11:36.751367 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-599gr\" (UniqueName: \"kubernetes.io/projected/7cb5cd09-28b4-41ca-a8ca-21fa6b4e3186-kube-api-access-599gr\") pod \"certified-operators-687zf\" (UID: \"7cb5cd09-28b4-41ca-a8ca-21fa6b4e3186\") " pod="openshift-marketplace/certified-operators-687zf" Nov 21 15:11:36 crc kubenswrapper[5133]: I1121 15:11:36.751613 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7cb5cd09-28b4-41ca-a8ca-21fa6b4e3186-utilities\") pod \"certified-operators-687zf\" (UID: \"7cb5cd09-28b4-41ca-a8ca-21fa6b4e3186\") " pod="openshift-marketplace/certified-operators-687zf" Nov 21 15:11:36 crc kubenswrapper[5133]: I1121 15:11:36.751729 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7cb5cd09-28b4-41ca-a8ca-21fa6b4e3186-catalog-content\") pod \"certified-operators-687zf\" (UID: \"7cb5cd09-28b4-41ca-a8ca-21fa6b4e3186\") " pod="openshift-marketplace/certified-operators-687zf" Nov 21 15:11:36 crc kubenswrapper[5133]: I1121 15:11:36.773978 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-599gr\" (UniqueName: \"kubernetes.io/projected/7cb5cd09-28b4-41ca-a8ca-21fa6b4e3186-kube-api-access-599gr\") pod \"certified-operators-687zf\" (UID: \"7cb5cd09-28b4-41ca-a8ca-21fa6b4e3186\") " pod="openshift-marketplace/certified-operators-687zf" Nov 21 15:11:36 crc kubenswrapper[5133]: I1121 15:11:36.824407 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-687zf" Nov 21 15:11:37 crc kubenswrapper[5133]: I1121 15:11:37.333250 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-687zf"] Nov 21 15:11:37 crc kubenswrapper[5133]: I1121 15:11:37.412890 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-687zf" event={"ID":"7cb5cd09-28b4-41ca-a8ca-21fa6b4e3186","Type":"ContainerStarted","Data":"286af44a6a8979432e028585c2480384faa7ddd1a2d24d010e847fcd36d22fba"} Nov 21 15:11:37 crc kubenswrapper[5133]: I1121 15:11:37.458554 5133 scope.go:117] "RemoveContainer" containerID="7a5c0c9b2a3bd64824e8d11ab9ec3268d4ce1e7c682c769e61779d3ac0b7de78" Nov 21 15:11:37 crc kubenswrapper[5133]: E1121 15:11:37.458828 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:11:38 crc kubenswrapper[5133]: I1121 15:11:38.432147 5133 generic.go:334] "Generic (PLEG): container finished" podID="7cb5cd09-28b4-41ca-a8ca-21fa6b4e3186" containerID="a494b589699ea5b79f81353bf65eb0eb0de36d7eb86cfdd916b40f251fd582bf" exitCode=0 Nov 21 15:11:38 crc kubenswrapper[5133]: I1121 15:11:38.432215 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-687zf" event={"ID":"7cb5cd09-28b4-41ca-a8ca-21fa6b4e3186","Type":"ContainerDied","Data":"a494b589699ea5b79f81353bf65eb0eb0de36d7eb86cfdd916b40f251fd582bf"} Nov 21 15:11:41 crc kubenswrapper[5133]: I1121 15:11:41.462228 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-687zf" event={"ID":"7cb5cd09-28b4-41ca-a8ca-21fa6b4e3186","Type":"ContainerStarted","Data":"0ce92a98d15ff469009cee973d70a50dfde9c0f6eecc2f51523d485f95baff35"} Nov 21 15:11:43 crc kubenswrapper[5133]: I1121 15:11:43.482311 5133 generic.go:334] "Generic (PLEG): container finished" podID="7cb5cd09-28b4-41ca-a8ca-21fa6b4e3186" containerID="0ce92a98d15ff469009cee973d70a50dfde9c0f6eecc2f51523d485f95baff35" exitCode=0 Nov 21 15:11:43 crc kubenswrapper[5133]: I1121 15:11:43.482366 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-687zf" event={"ID":"7cb5cd09-28b4-41ca-a8ca-21fa6b4e3186","Type":"ContainerDied","Data":"0ce92a98d15ff469009cee973d70a50dfde9c0f6eecc2f51523d485f95baff35"} Nov 21 15:11:44 crc kubenswrapper[5133]: I1121 15:11:44.492638 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-687zf" event={"ID":"7cb5cd09-28b4-41ca-a8ca-21fa6b4e3186","Type":"ContainerStarted","Data":"558232e4b28a45b62afe265078196117e2e7e82fd4cf4736a592a8a66108a7aa"} Nov 21 15:11:44 crc kubenswrapper[5133]: I1121 15:11:44.515202 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-687zf" podStartSLOduration=3.035787865 podStartE2EDuration="8.515183627s" podCreationTimestamp="2025-11-21 15:11:36 +0000 UTC" firstStartedPulling="2025-11-21 15:11:38.434454715 +0000 UTC m=+5358.232286973" lastFinishedPulling="2025-11-21 15:11:43.913850477 +0000 UTC m=+5363.711682735" observedRunningTime="2025-11-21 15:11:44.511730295 +0000 UTC m=+5364.309562553" watchObservedRunningTime="2025-11-21 15:11:44.515183627 +0000 UTC m=+5364.313015875" Nov 21 15:11:46 crc kubenswrapper[5133]: I1121 15:11:46.825473 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-687zf" Nov 21 15:11:46 crc kubenswrapper[5133]: I1121 15:11:46.826386 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-687zf" Nov 21 15:11:47 crc kubenswrapper[5133]: I1121 15:11:47.350368 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-687zf" Nov 21 15:11:52 crc kubenswrapper[5133]: I1121 15:11:52.469967 5133 scope.go:117] "RemoveContainer" containerID="7a5c0c9b2a3bd64824e8d11ab9ec3268d4ce1e7c682c769e61779d3ac0b7de78" Nov 21 15:11:52 crc kubenswrapper[5133]: E1121 15:11:52.471049 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:11:56 crc kubenswrapper[5133]: I1121 15:11:56.872099 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-687zf" Nov 21 15:11:56 crc kubenswrapper[5133]: I1121 15:11:56.932820 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-687zf"] Nov 21 15:11:57 crc kubenswrapper[5133]: I1121 15:11:57.623397 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-687zf" podUID="7cb5cd09-28b4-41ca-a8ca-21fa6b4e3186" containerName="registry-server" containerID="cri-o://558232e4b28a45b62afe265078196117e2e7e82fd4cf4736a592a8a66108a7aa" gracePeriod=2 Nov 21 15:11:58 crc kubenswrapper[5133]: I1121 15:11:58.197432 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-687zf" Nov 21 15:11:58 crc kubenswrapper[5133]: I1121 15:11:58.299258 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7cb5cd09-28b4-41ca-a8ca-21fa6b4e3186-catalog-content\") pod \"7cb5cd09-28b4-41ca-a8ca-21fa6b4e3186\" (UID: \"7cb5cd09-28b4-41ca-a8ca-21fa6b4e3186\") " Nov 21 15:11:58 crc kubenswrapper[5133]: I1121 15:11:58.299459 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-599gr\" (UniqueName: \"kubernetes.io/projected/7cb5cd09-28b4-41ca-a8ca-21fa6b4e3186-kube-api-access-599gr\") pod \"7cb5cd09-28b4-41ca-a8ca-21fa6b4e3186\" (UID: \"7cb5cd09-28b4-41ca-a8ca-21fa6b4e3186\") " Nov 21 15:11:58 crc kubenswrapper[5133]: I1121 15:11:58.300264 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7cb5cd09-28b4-41ca-a8ca-21fa6b4e3186-utilities\") pod \"7cb5cd09-28b4-41ca-a8ca-21fa6b4e3186\" (UID: \"7cb5cd09-28b4-41ca-a8ca-21fa6b4e3186\") " Nov 21 15:11:58 crc kubenswrapper[5133]: I1121 15:11:58.301428 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7cb5cd09-28b4-41ca-a8ca-21fa6b4e3186-utilities" (OuterVolumeSpecName: "utilities") pod "7cb5cd09-28b4-41ca-a8ca-21fa6b4e3186" (UID: "7cb5cd09-28b4-41ca-a8ca-21fa6b4e3186"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:11:58 crc kubenswrapper[5133]: I1121 15:11:58.309526 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7cb5cd09-28b4-41ca-a8ca-21fa6b4e3186-kube-api-access-599gr" (OuterVolumeSpecName: "kube-api-access-599gr") pod "7cb5cd09-28b4-41ca-a8ca-21fa6b4e3186" (UID: "7cb5cd09-28b4-41ca-a8ca-21fa6b4e3186"). InnerVolumeSpecName "kube-api-access-599gr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:11:58 crc kubenswrapper[5133]: I1121 15:11:58.375790 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7cb5cd09-28b4-41ca-a8ca-21fa6b4e3186-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7cb5cd09-28b4-41ca-a8ca-21fa6b4e3186" (UID: "7cb5cd09-28b4-41ca-a8ca-21fa6b4e3186"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:11:58 crc kubenswrapper[5133]: I1121 15:11:58.402516 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-599gr\" (UniqueName: \"kubernetes.io/projected/7cb5cd09-28b4-41ca-a8ca-21fa6b4e3186-kube-api-access-599gr\") on node \"crc\" DevicePath \"\"" Nov 21 15:11:58 crc kubenswrapper[5133]: I1121 15:11:58.402555 5133 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7cb5cd09-28b4-41ca-a8ca-21fa6b4e3186-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 15:11:58 crc kubenswrapper[5133]: I1121 15:11:58.402569 5133 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7cb5cd09-28b4-41ca-a8ca-21fa6b4e3186-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 15:11:58 crc kubenswrapper[5133]: I1121 15:11:58.634455 5133 generic.go:334] "Generic (PLEG): container finished" podID="7cb5cd09-28b4-41ca-a8ca-21fa6b4e3186" containerID="558232e4b28a45b62afe265078196117e2e7e82fd4cf4736a592a8a66108a7aa" exitCode=0 Nov 21 15:11:58 crc kubenswrapper[5133]: I1121 15:11:58.634508 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-687zf" event={"ID":"7cb5cd09-28b4-41ca-a8ca-21fa6b4e3186","Type":"ContainerDied","Data":"558232e4b28a45b62afe265078196117e2e7e82fd4cf4736a592a8a66108a7aa"} Nov 21 15:11:58 crc kubenswrapper[5133]: I1121 15:11:58.634544 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-687zf" event={"ID":"7cb5cd09-28b4-41ca-a8ca-21fa6b4e3186","Type":"ContainerDied","Data":"286af44a6a8979432e028585c2480384faa7ddd1a2d24d010e847fcd36d22fba"} Nov 21 15:11:58 crc kubenswrapper[5133]: I1121 15:11:58.634566 5133 scope.go:117] "RemoveContainer" containerID="558232e4b28a45b62afe265078196117e2e7e82fd4cf4736a592a8a66108a7aa" Nov 21 15:11:58 crc kubenswrapper[5133]: I1121 15:11:58.634736 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-687zf" Nov 21 15:11:58 crc kubenswrapper[5133]: I1121 15:11:58.665598 5133 scope.go:117] "RemoveContainer" containerID="0ce92a98d15ff469009cee973d70a50dfde9c0f6eecc2f51523d485f95baff35" Nov 21 15:11:58 crc kubenswrapper[5133]: I1121 15:11:58.666880 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-687zf"] Nov 21 15:11:58 crc kubenswrapper[5133]: I1121 15:11:58.676639 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-687zf"] Nov 21 15:11:58 crc kubenswrapper[5133]: I1121 15:11:58.704733 5133 scope.go:117] "RemoveContainer" containerID="a494b589699ea5b79f81353bf65eb0eb0de36d7eb86cfdd916b40f251fd582bf" Nov 21 15:11:58 crc kubenswrapper[5133]: I1121 15:11:58.782849 5133 scope.go:117] "RemoveContainer" containerID="558232e4b28a45b62afe265078196117e2e7e82fd4cf4736a592a8a66108a7aa" Nov 21 15:11:58 crc kubenswrapper[5133]: E1121 15:11:58.783609 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"558232e4b28a45b62afe265078196117e2e7e82fd4cf4736a592a8a66108a7aa\": container with ID starting with 558232e4b28a45b62afe265078196117e2e7e82fd4cf4736a592a8a66108a7aa not found: ID does not exist" containerID="558232e4b28a45b62afe265078196117e2e7e82fd4cf4736a592a8a66108a7aa" Nov 21 15:11:58 crc kubenswrapper[5133]: I1121 15:11:58.783768 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"558232e4b28a45b62afe265078196117e2e7e82fd4cf4736a592a8a66108a7aa"} err="failed to get container status \"558232e4b28a45b62afe265078196117e2e7e82fd4cf4736a592a8a66108a7aa\": rpc error: code = NotFound desc = could not find container \"558232e4b28a45b62afe265078196117e2e7e82fd4cf4736a592a8a66108a7aa\": container with ID starting with 558232e4b28a45b62afe265078196117e2e7e82fd4cf4736a592a8a66108a7aa not found: ID does not exist" Nov 21 15:11:58 crc kubenswrapper[5133]: I1121 15:11:58.783811 5133 scope.go:117] "RemoveContainer" containerID="0ce92a98d15ff469009cee973d70a50dfde9c0f6eecc2f51523d485f95baff35" Nov 21 15:11:58 crc kubenswrapper[5133]: E1121 15:11:58.784198 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0ce92a98d15ff469009cee973d70a50dfde9c0f6eecc2f51523d485f95baff35\": container with ID starting with 0ce92a98d15ff469009cee973d70a50dfde9c0f6eecc2f51523d485f95baff35 not found: ID does not exist" containerID="0ce92a98d15ff469009cee973d70a50dfde9c0f6eecc2f51523d485f95baff35" Nov 21 15:11:58 crc kubenswrapper[5133]: I1121 15:11:58.784230 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0ce92a98d15ff469009cee973d70a50dfde9c0f6eecc2f51523d485f95baff35"} err="failed to get container status \"0ce92a98d15ff469009cee973d70a50dfde9c0f6eecc2f51523d485f95baff35\": rpc error: code = NotFound desc = could not find container \"0ce92a98d15ff469009cee973d70a50dfde9c0f6eecc2f51523d485f95baff35\": container with ID starting with 0ce92a98d15ff469009cee973d70a50dfde9c0f6eecc2f51523d485f95baff35 not found: ID does not exist" Nov 21 15:11:58 crc kubenswrapper[5133]: I1121 15:11:58.784253 5133 scope.go:117] "RemoveContainer" containerID="a494b589699ea5b79f81353bf65eb0eb0de36d7eb86cfdd916b40f251fd582bf" Nov 21 15:11:58 crc kubenswrapper[5133]: E1121 15:11:58.784519 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a494b589699ea5b79f81353bf65eb0eb0de36d7eb86cfdd916b40f251fd582bf\": container with ID starting with a494b589699ea5b79f81353bf65eb0eb0de36d7eb86cfdd916b40f251fd582bf not found: ID does not exist" containerID="a494b589699ea5b79f81353bf65eb0eb0de36d7eb86cfdd916b40f251fd582bf" Nov 21 15:11:58 crc kubenswrapper[5133]: I1121 15:11:58.784556 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a494b589699ea5b79f81353bf65eb0eb0de36d7eb86cfdd916b40f251fd582bf"} err="failed to get container status \"a494b589699ea5b79f81353bf65eb0eb0de36d7eb86cfdd916b40f251fd582bf\": rpc error: code = NotFound desc = could not find container \"a494b589699ea5b79f81353bf65eb0eb0de36d7eb86cfdd916b40f251fd582bf\": container with ID starting with a494b589699ea5b79f81353bf65eb0eb0de36d7eb86cfdd916b40f251fd582bf not found: ID does not exist" Nov 21 15:12:00 crc kubenswrapper[5133]: I1121 15:12:00.472087 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7cb5cd09-28b4-41ca-a8ca-21fa6b4e3186" path="/var/lib/kubelet/pods/7cb5cd09-28b4-41ca-a8ca-21fa6b4e3186/volumes" Nov 21 15:12:05 crc kubenswrapper[5133]: I1121 15:12:05.457776 5133 scope.go:117] "RemoveContainer" containerID="7a5c0c9b2a3bd64824e8d11ab9ec3268d4ce1e7c682c769e61779d3ac0b7de78" Nov 21 15:12:05 crc kubenswrapper[5133]: E1121 15:12:05.459051 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:12:19 crc kubenswrapper[5133]: I1121 15:12:19.458817 5133 scope.go:117] "RemoveContainer" containerID="7a5c0c9b2a3bd64824e8d11ab9ec3268d4ce1e7c682c769e61779d3ac0b7de78" Nov 21 15:12:19 crc kubenswrapper[5133]: E1121 15:12:19.460448 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:12:34 crc kubenswrapper[5133]: I1121 15:12:34.458561 5133 scope.go:117] "RemoveContainer" containerID="7a5c0c9b2a3bd64824e8d11ab9ec3268d4ce1e7c682c769e61779d3ac0b7de78" Nov 21 15:12:34 crc kubenswrapper[5133]: E1121 15:12:34.459753 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:12:45 crc kubenswrapper[5133]: I1121 15:12:45.459318 5133 scope.go:117] "RemoveContainer" containerID="7a5c0c9b2a3bd64824e8d11ab9ec3268d4ce1e7c682c769e61779d3ac0b7de78" Nov 21 15:12:45 crc kubenswrapper[5133]: E1121 15:12:45.460399 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:12:59 crc kubenswrapper[5133]: I1121 15:12:59.458698 5133 scope.go:117] "RemoveContainer" containerID="7a5c0c9b2a3bd64824e8d11ab9ec3268d4ce1e7c682c769e61779d3ac0b7de78" Nov 21 15:13:00 crc kubenswrapper[5133]: I1121 15:13:00.275468 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" event={"ID":"52f5a729-05d1-4f84-a216-1df3233af57d","Type":"ContainerStarted","Data":"243c350f1024ea95a5f00b2cbddacf63b17dd2a595c5ab223b68dfcddc42652f"} Nov 21 15:13:34 crc kubenswrapper[5133]: I1121 15:13:34.610680 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-77qsb"] Nov 21 15:13:34 crc kubenswrapper[5133]: E1121 15:13:34.614095 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7cb5cd09-28b4-41ca-a8ca-21fa6b4e3186" containerName="registry-server" Nov 21 15:13:34 crc kubenswrapper[5133]: I1121 15:13:34.614131 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="7cb5cd09-28b4-41ca-a8ca-21fa6b4e3186" containerName="registry-server" Nov 21 15:13:34 crc kubenswrapper[5133]: E1121 15:13:34.614158 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7cb5cd09-28b4-41ca-a8ca-21fa6b4e3186" containerName="extract-content" Nov 21 15:13:34 crc kubenswrapper[5133]: I1121 15:13:34.614165 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="7cb5cd09-28b4-41ca-a8ca-21fa6b4e3186" containerName="extract-content" Nov 21 15:13:34 crc kubenswrapper[5133]: E1121 15:13:34.614180 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7cb5cd09-28b4-41ca-a8ca-21fa6b4e3186" containerName="extract-utilities" Nov 21 15:13:34 crc kubenswrapper[5133]: I1121 15:13:34.614187 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="7cb5cd09-28b4-41ca-a8ca-21fa6b4e3186" containerName="extract-utilities" Nov 21 15:13:34 crc kubenswrapper[5133]: I1121 15:13:34.614535 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="7cb5cd09-28b4-41ca-a8ca-21fa6b4e3186" containerName="registry-server" Nov 21 15:13:34 crc kubenswrapper[5133]: I1121 15:13:34.616387 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-77qsb" Nov 21 15:13:34 crc kubenswrapper[5133]: I1121 15:13:34.634607 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-77qsb"] Nov 21 15:13:34 crc kubenswrapper[5133]: I1121 15:13:34.727063 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tr6hb\" (UniqueName: \"kubernetes.io/projected/09ed6647-b85b-4b66-b1d0-aa12ffabbf75-kube-api-access-tr6hb\") pod \"redhat-marketplace-77qsb\" (UID: \"09ed6647-b85b-4b66-b1d0-aa12ffabbf75\") " pod="openshift-marketplace/redhat-marketplace-77qsb" Nov 21 15:13:34 crc kubenswrapper[5133]: I1121 15:13:34.727155 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/09ed6647-b85b-4b66-b1d0-aa12ffabbf75-utilities\") pod \"redhat-marketplace-77qsb\" (UID: \"09ed6647-b85b-4b66-b1d0-aa12ffabbf75\") " pod="openshift-marketplace/redhat-marketplace-77qsb" Nov 21 15:13:34 crc kubenswrapper[5133]: I1121 15:13:34.727225 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/09ed6647-b85b-4b66-b1d0-aa12ffabbf75-catalog-content\") pod \"redhat-marketplace-77qsb\" (UID: \"09ed6647-b85b-4b66-b1d0-aa12ffabbf75\") " pod="openshift-marketplace/redhat-marketplace-77qsb" Nov 21 15:13:34 crc kubenswrapper[5133]: I1121 15:13:34.829863 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tr6hb\" (UniqueName: \"kubernetes.io/projected/09ed6647-b85b-4b66-b1d0-aa12ffabbf75-kube-api-access-tr6hb\") pod \"redhat-marketplace-77qsb\" (UID: \"09ed6647-b85b-4b66-b1d0-aa12ffabbf75\") " pod="openshift-marketplace/redhat-marketplace-77qsb" Nov 21 15:13:34 crc kubenswrapper[5133]: I1121 15:13:34.829940 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/09ed6647-b85b-4b66-b1d0-aa12ffabbf75-utilities\") pod \"redhat-marketplace-77qsb\" (UID: \"09ed6647-b85b-4b66-b1d0-aa12ffabbf75\") " pod="openshift-marketplace/redhat-marketplace-77qsb" Nov 21 15:13:34 crc kubenswrapper[5133]: I1121 15:13:34.830020 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/09ed6647-b85b-4b66-b1d0-aa12ffabbf75-catalog-content\") pod \"redhat-marketplace-77qsb\" (UID: \"09ed6647-b85b-4b66-b1d0-aa12ffabbf75\") " pod="openshift-marketplace/redhat-marketplace-77qsb" Nov 21 15:13:34 crc kubenswrapper[5133]: I1121 15:13:34.830604 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/09ed6647-b85b-4b66-b1d0-aa12ffabbf75-catalog-content\") pod \"redhat-marketplace-77qsb\" (UID: \"09ed6647-b85b-4b66-b1d0-aa12ffabbf75\") " pod="openshift-marketplace/redhat-marketplace-77qsb" Nov 21 15:13:34 crc kubenswrapper[5133]: I1121 15:13:34.830883 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/09ed6647-b85b-4b66-b1d0-aa12ffabbf75-utilities\") pod \"redhat-marketplace-77qsb\" (UID: \"09ed6647-b85b-4b66-b1d0-aa12ffabbf75\") " pod="openshift-marketplace/redhat-marketplace-77qsb" Nov 21 15:13:34 crc kubenswrapper[5133]: I1121 15:13:34.861430 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tr6hb\" (UniqueName: \"kubernetes.io/projected/09ed6647-b85b-4b66-b1d0-aa12ffabbf75-kube-api-access-tr6hb\") pod \"redhat-marketplace-77qsb\" (UID: \"09ed6647-b85b-4b66-b1d0-aa12ffabbf75\") " pod="openshift-marketplace/redhat-marketplace-77qsb" Nov 21 15:13:34 crc kubenswrapper[5133]: I1121 15:13:34.948569 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-77qsb" Nov 21 15:13:35 crc kubenswrapper[5133]: I1121 15:13:35.431500 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-77qsb"] Nov 21 15:13:35 crc kubenswrapper[5133]: I1121 15:13:35.647643 5133 generic.go:334] "Generic (PLEG): container finished" podID="09ed6647-b85b-4b66-b1d0-aa12ffabbf75" containerID="a118ac14f950026856595d88fa5cb561f70d8224eb62602ba8eff1d6de684b6b" exitCode=0 Nov 21 15:13:35 crc kubenswrapper[5133]: I1121 15:13:35.647688 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-77qsb" event={"ID":"09ed6647-b85b-4b66-b1d0-aa12ffabbf75","Type":"ContainerDied","Data":"a118ac14f950026856595d88fa5cb561f70d8224eb62602ba8eff1d6de684b6b"} Nov 21 15:13:35 crc kubenswrapper[5133]: I1121 15:13:35.647719 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-77qsb" event={"ID":"09ed6647-b85b-4b66-b1d0-aa12ffabbf75","Type":"ContainerStarted","Data":"6ec7170da2a0d71fbf24d607dad17aaa95609198c33f35af3df43c1f447223b0"} Nov 21 15:13:35 crc kubenswrapper[5133]: I1121 15:13:35.649728 5133 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 21 15:13:37 crc kubenswrapper[5133]: I1121 15:13:37.688984 5133 generic.go:334] "Generic (PLEG): container finished" podID="09ed6647-b85b-4b66-b1d0-aa12ffabbf75" containerID="7d331fcade4bd20b656ff640f404cb80ea319517f6701e82b69d7c6b215d69b2" exitCode=0 Nov 21 15:13:37 crc kubenswrapper[5133]: I1121 15:13:37.689529 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-77qsb" event={"ID":"09ed6647-b85b-4b66-b1d0-aa12ffabbf75","Type":"ContainerDied","Data":"7d331fcade4bd20b656ff640f404cb80ea319517f6701e82b69d7c6b215d69b2"} Nov 21 15:13:38 crc kubenswrapper[5133]: I1121 15:13:38.700248 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-77qsb" event={"ID":"09ed6647-b85b-4b66-b1d0-aa12ffabbf75","Type":"ContainerStarted","Data":"0f452dc0c729cb1ce32bc9ee3805b5c7a645a063030d9d08b7002db936399b78"} Nov 21 15:13:38 crc kubenswrapper[5133]: I1121 15:13:38.719407 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-77qsb" podStartSLOduration=2.296355329 podStartE2EDuration="4.719388561s" podCreationTimestamp="2025-11-21 15:13:34 +0000 UTC" firstStartedPulling="2025-11-21 15:13:35.649458475 +0000 UTC m=+5475.447290723" lastFinishedPulling="2025-11-21 15:13:38.072491707 +0000 UTC m=+5477.870323955" observedRunningTime="2025-11-21 15:13:38.717806089 +0000 UTC m=+5478.515638357" watchObservedRunningTime="2025-11-21 15:13:38.719388561 +0000 UTC m=+5478.517220809" Nov 21 15:13:44 crc kubenswrapper[5133]: I1121 15:13:44.948918 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-77qsb" Nov 21 15:13:44 crc kubenswrapper[5133]: I1121 15:13:44.949886 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-77qsb" Nov 21 15:13:45 crc kubenswrapper[5133]: I1121 15:13:45.022333 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-77qsb" Nov 21 15:13:45 crc kubenswrapper[5133]: I1121 15:13:45.828142 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-77qsb" Nov 21 15:13:45 crc kubenswrapper[5133]: I1121 15:13:45.878555 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-77qsb"] Nov 21 15:13:47 crc kubenswrapper[5133]: I1121 15:13:47.798388 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-77qsb" podUID="09ed6647-b85b-4b66-b1d0-aa12ffabbf75" containerName="registry-server" containerID="cri-o://0f452dc0c729cb1ce32bc9ee3805b5c7a645a063030d9d08b7002db936399b78" gracePeriod=2 Nov 21 15:13:48 crc kubenswrapper[5133]: I1121 15:13:48.391061 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-77qsb" Nov 21 15:13:48 crc kubenswrapper[5133]: I1121 15:13:48.536010 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/09ed6647-b85b-4b66-b1d0-aa12ffabbf75-catalog-content\") pod \"09ed6647-b85b-4b66-b1d0-aa12ffabbf75\" (UID: \"09ed6647-b85b-4b66-b1d0-aa12ffabbf75\") " Nov 21 15:13:48 crc kubenswrapper[5133]: I1121 15:13:48.536247 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tr6hb\" (UniqueName: \"kubernetes.io/projected/09ed6647-b85b-4b66-b1d0-aa12ffabbf75-kube-api-access-tr6hb\") pod \"09ed6647-b85b-4b66-b1d0-aa12ffabbf75\" (UID: \"09ed6647-b85b-4b66-b1d0-aa12ffabbf75\") " Nov 21 15:13:48 crc kubenswrapper[5133]: I1121 15:13:48.536304 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/09ed6647-b85b-4b66-b1d0-aa12ffabbf75-utilities\") pod \"09ed6647-b85b-4b66-b1d0-aa12ffabbf75\" (UID: \"09ed6647-b85b-4b66-b1d0-aa12ffabbf75\") " Nov 21 15:13:48 crc kubenswrapper[5133]: I1121 15:13:48.537537 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/09ed6647-b85b-4b66-b1d0-aa12ffabbf75-utilities" (OuterVolumeSpecName: "utilities") pod "09ed6647-b85b-4b66-b1d0-aa12ffabbf75" (UID: "09ed6647-b85b-4b66-b1d0-aa12ffabbf75"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:13:48 crc kubenswrapper[5133]: I1121 15:13:48.543358 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ed6647-b85b-4b66-b1d0-aa12ffabbf75-kube-api-access-tr6hb" (OuterVolumeSpecName: "kube-api-access-tr6hb") pod "09ed6647-b85b-4b66-b1d0-aa12ffabbf75" (UID: "09ed6647-b85b-4b66-b1d0-aa12ffabbf75"). InnerVolumeSpecName "kube-api-access-tr6hb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:13:48 crc kubenswrapper[5133]: I1121 15:13:48.556922 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/09ed6647-b85b-4b66-b1d0-aa12ffabbf75-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "09ed6647-b85b-4b66-b1d0-aa12ffabbf75" (UID: "09ed6647-b85b-4b66-b1d0-aa12ffabbf75"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:13:48 crc kubenswrapper[5133]: I1121 15:13:48.639297 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tr6hb\" (UniqueName: \"kubernetes.io/projected/09ed6647-b85b-4b66-b1d0-aa12ffabbf75-kube-api-access-tr6hb\") on node \"crc\" DevicePath \"\"" Nov 21 15:13:48 crc kubenswrapper[5133]: I1121 15:13:48.639346 5133 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/09ed6647-b85b-4b66-b1d0-aa12ffabbf75-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 15:13:48 crc kubenswrapper[5133]: I1121 15:13:48.639364 5133 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/09ed6647-b85b-4b66-b1d0-aa12ffabbf75-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 15:13:48 crc kubenswrapper[5133]: I1121 15:13:48.814138 5133 generic.go:334] "Generic (PLEG): container finished" podID="09ed6647-b85b-4b66-b1d0-aa12ffabbf75" containerID="0f452dc0c729cb1ce32bc9ee3805b5c7a645a063030d9d08b7002db936399b78" exitCode=0 Nov 21 15:13:48 crc kubenswrapper[5133]: I1121 15:13:48.814220 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-77qsb" event={"ID":"09ed6647-b85b-4b66-b1d0-aa12ffabbf75","Type":"ContainerDied","Data":"0f452dc0c729cb1ce32bc9ee3805b5c7a645a063030d9d08b7002db936399b78"} Nov 21 15:13:48 crc kubenswrapper[5133]: I1121 15:13:48.814285 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-77qsb" Nov 21 15:13:48 crc kubenswrapper[5133]: I1121 15:13:48.814316 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-77qsb" event={"ID":"09ed6647-b85b-4b66-b1d0-aa12ffabbf75","Type":"ContainerDied","Data":"6ec7170da2a0d71fbf24d607dad17aaa95609198c33f35af3df43c1f447223b0"} Nov 21 15:13:48 crc kubenswrapper[5133]: I1121 15:13:48.814357 5133 scope.go:117] "RemoveContainer" containerID="0f452dc0c729cb1ce32bc9ee3805b5c7a645a063030d9d08b7002db936399b78" Nov 21 15:13:48 crc kubenswrapper[5133]: I1121 15:13:48.842523 5133 scope.go:117] "RemoveContainer" containerID="7d331fcade4bd20b656ff640f404cb80ea319517f6701e82b69d7c6b215d69b2" Nov 21 15:13:48 crc kubenswrapper[5133]: I1121 15:13:48.862795 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-77qsb"] Nov 21 15:13:48 crc kubenswrapper[5133]: I1121 15:13:48.878127 5133 scope.go:117] "RemoveContainer" containerID="a118ac14f950026856595d88fa5cb561f70d8224eb62602ba8eff1d6de684b6b" Nov 21 15:13:48 crc kubenswrapper[5133]: I1121 15:13:48.878758 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-77qsb"] Nov 21 15:13:48 crc kubenswrapper[5133]: I1121 15:13:48.949780 5133 scope.go:117] "RemoveContainer" containerID="0f452dc0c729cb1ce32bc9ee3805b5c7a645a063030d9d08b7002db936399b78" Nov 21 15:13:48 crc kubenswrapper[5133]: E1121 15:13:48.950338 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0f452dc0c729cb1ce32bc9ee3805b5c7a645a063030d9d08b7002db936399b78\": container with ID starting with 0f452dc0c729cb1ce32bc9ee3805b5c7a645a063030d9d08b7002db936399b78 not found: ID does not exist" containerID="0f452dc0c729cb1ce32bc9ee3805b5c7a645a063030d9d08b7002db936399b78" Nov 21 15:13:48 crc kubenswrapper[5133]: I1121 15:13:48.950387 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0f452dc0c729cb1ce32bc9ee3805b5c7a645a063030d9d08b7002db936399b78"} err="failed to get container status \"0f452dc0c729cb1ce32bc9ee3805b5c7a645a063030d9d08b7002db936399b78\": rpc error: code = NotFound desc = could not find container \"0f452dc0c729cb1ce32bc9ee3805b5c7a645a063030d9d08b7002db936399b78\": container with ID starting with 0f452dc0c729cb1ce32bc9ee3805b5c7a645a063030d9d08b7002db936399b78 not found: ID does not exist" Nov 21 15:13:48 crc kubenswrapper[5133]: I1121 15:13:48.950422 5133 scope.go:117] "RemoveContainer" containerID="7d331fcade4bd20b656ff640f404cb80ea319517f6701e82b69d7c6b215d69b2" Nov 21 15:13:48 crc kubenswrapper[5133]: E1121 15:13:48.951727 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7d331fcade4bd20b656ff640f404cb80ea319517f6701e82b69d7c6b215d69b2\": container with ID starting with 7d331fcade4bd20b656ff640f404cb80ea319517f6701e82b69d7c6b215d69b2 not found: ID does not exist" containerID="7d331fcade4bd20b656ff640f404cb80ea319517f6701e82b69d7c6b215d69b2" Nov 21 15:13:48 crc kubenswrapper[5133]: I1121 15:13:48.951760 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7d331fcade4bd20b656ff640f404cb80ea319517f6701e82b69d7c6b215d69b2"} err="failed to get container status \"7d331fcade4bd20b656ff640f404cb80ea319517f6701e82b69d7c6b215d69b2\": rpc error: code = NotFound desc = could not find container \"7d331fcade4bd20b656ff640f404cb80ea319517f6701e82b69d7c6b215d69b2\": container with ID starting with 7d331fcade4bd20b656ff640f404cb80ea319517f6701e82b69d7c6b215d69b2 not found: ID does not exist" Nov 21 15:13:48 crc kubenswrapper[5133]: I1121 15:13:48.951779 5133 scope.go:117] "RemoveContainer" containerID="a118ac14f950026856595d88fa5cb561f70d8224eb62602ba8eff1d6de684b6b" Nov 21 15:13:48 crc kubenswrapper[5133]: E1121 15:13:48.952204 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a118ac14f950026856595d88fa5cb561f70d8224eb62602ba8eff1d6de684b6b\": container with ID starting with a118ac14f950026856595d88fa5cb561f70d8224eb62602ba8eff1d6de684b6b not found: ID does not exist" containerID="a118ac14f950026856595d88fa5cb561f70d8224eb62602ba8eff1d6de684b6b" Nov 21 15:13:48 crc kubenswrapper[5133]: I1121 15:13:48.952257 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a118ac14f950026856595d88fa5cb561f70d8224eb62602ba8eff1d6de684b6b"} err="failed to get container status \"a118ac14f950026856595d88fa5cb561f70d8224eb62602ba8eff1d6de684b6b\": rpc error: code = NotFound desc = could not find container \"a118ac14f950026856595d88fa5cb561f70d8224eb62602ba8eff1d6de684b6b\": container with ID starting with a118ac14f950026856595d88fa5cb561f70d8224eb62602ba8eff1d6de684b6b not found: ID does not exist" Nov 21 15:13:50 crc kubenswrapper[5133]: I1121 15:13:50.470509 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ed6647-b85b-4b66-b1d0-aa12ffabbf75" path="/var/lib/kubelet/pods/09ed6647-b85b-4b66-b1d0-aa12ffabbf75/volumes" Nov 21 15:14:43 crc kubenswrapper[5133]: I1121 15:14:43.052938 5133 scope.go:117] "RemoveContainer" containerID="207e11f4528c71406011e70e6c0f52772edb9f3cc13d80f9852bed78c5010474" Nov 21 15:14:43 crc kubenswrapper[5133]: I1121 15:14:43.093071 5133 scope.go:117] "RemoveContainer" containerID="88e00bbce34de6a9258865b2eccf51feb84ace28f572ef066c0eded608613253" Nov 21 15:14:43 crc kubenswrapper[5133]: I1121 15:14:43.155821 5133 scope.go:117] "RemoveContainer" containerID="d208458e90187dc37e6de3e6c2c6509a5d188287ca186f16703824ec081d2080" Nov 21 15:15:00 crc kubenswrapper[5133]: I1121 15:15:00.163095 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395635-m9nmd"] Nov 21 15:15:00 crc kubenswrapper[5133]: E1121 15:15:00.164837 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09ed6647-b85b-4b66-b1d0-aa12ffabbf75" containerName="extract-utilities" Nov 21 15:15:00 crc kubenswrapper[5133]: I1121 15:15:00.164878 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="09ed6647-b85b-4b66-b1d0-aa12ffabbf75" containerName="extract-utilities" Nov 21 15:15:00 crc kubenswrapper[5133]: E1121 15:15:00.164924 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09ed6647-b85b-4b66-b1d0-aa12ffabbf75" containerName="registry-server" Nov 21 15:15:00 crc kubenswrapper[5133]: I1121 15:15:00.164947 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="09ed6647-b85b-4b66-b1d0-aa12ffabbf75" containerName="registry-server" Nov 21 15:15:00 crc kubenswrapper[5133]: E1121 15:15:00.165062 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09ed6647-b85b-4b66-b1d0-aa12ffabbf75" containerName="extract-content" Nov 21 15:15:00 crc kubenswrapper[5133]: I1121 15:15:00.165088 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="09ed6647-b85b-4b66-b1d0-aa12ffabbf75" containerName="extract-content" Nov 21 15:15:00 crc kubenswrapper[5133]: I1121 15:15:00.165580 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="09ed6647-b85b-4b66-b1d0-aa12ffabbf75" containerName="registry-server" Nov 21 15:15:00 crc kubenswrapper[5133]: I1121 15:15:00.167182 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395635-m9nmd" Nov 21 15:15:00 crc kubenswrapper[5133]: I1121 15:15:00.170127 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 21 15:15:00 crc kubenswrapper[5133]: I1121 15:15:00.171746 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 21 15:15:00 crc kubenswrapper[5133]: I1121 15:15:00.187317 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395635-m9nmd"] Nov 21 15:15:00 crc kubenswrapper[5133]: I1121 15:15:00.305026 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/332ddfd4-1fb3-43de-ad7c-eb9f9776324a-config-volume\") pod \"collect-profiles-29395635-m9nmd\" (UID: \"332ddfd4-1fb3-43de-ad7c-eb9f9776324a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395635-m9nmd" Nov 21 15:15:00 crc kubenswrapper[5133]: I1121 15:15:00.305165 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9vpq5\" (UniqueName: \"kubernetes.io/projected/332ddfd4-1fb3-43de-ad7c-eb9f9776324a-kube-api-access-9vpq5\") pod \"collect-profiles-29395635-m9nmd\" (UID: \"332ddfd4-1fb3-43de-ad7c-eb9f9776324a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395635-m9nmd" Nov 21 15:15:00 crc kubenswrapper[5133]: I1121 15:15:00.305230 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/332ddfd4-1fb3-43de-ad7c-eb9f9776324a-secret-volume\") pod \"collect-profiles-29395635-m9nmd\" (UID: \"332ddfd4-1fb3-43de-ad7c-eb9f9776324a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395635-m9nmd" Nov 21 15:15:00 crc kubenswrapper[5133]: I1121 15:15:00.407742 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/332ddfd4-1fb3-43de-ad7c-eb9f9776324a-config-volume\") pod \"collect-profiles-29395635-m9nmd\" (UID: \"332ddfd4-1fb3-43de-ad7c-eb9f9776324a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395635-m9nmd" Nov 21 15:15:00 crc kubenswrapper[5133]: I1121 15:15:00.408271 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9vpq5\" (UniqueName: \"kubernetes.io/projected/332ddfd4-1fb3-43de-ad7c-eb9f9776324a-kube-api-access-9vpq5\") pod \"collect-profiles-29395635-m9nmd\" (UID: \"332ddfd4-1fb3-43de-ad7c-eb9f9776324a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395635-m9nmd" Nov 21 15:15:00 crc kubenswrapper[5133]: I1121 15:15:00.408336 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/332ddfd4-1fb3-43de-ad7c-eb9f9776324a-secret-volume\") pod \"collect-profiles-29395635-m9nmd\" (UID: \"332ddfd4-1fb3-43de-ad7c-eb9f9776324a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395635-m9nmd" Nov 21 15:15:00 crc kubenswrapper[5133]: I1121 15:15:00.408745 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/332ddfd4-1fb3-43de-ad7c-eb9f9776324a-config-volume\") pod \"collect-profiles-29395635-m9nmd\" (UID: \"332ddfd4-1fb3-43de-ad7c-eb9f9776324a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395635-m9nmd" Nov 21 15:15:00 crc kubenswrapper[5133]: I1121 15:15:00.415793 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/332ddfd4-1fb3-43de-ad7c-eb9f9776324a-secret-volume\") pod \"collect-profiles-29395635-m9nmd\" (UID: \"332ddfd4-1fb3-43de-ad7c-eb9f9776324a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395635-m9nmd" Nov 21 15:15:00 crc kubenswrapper[5133]: I1121 15:15:00.430316 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9vpq5\" (UniqueName: \"kubernetes.io/projected/332ddfd4-1fb3-43de-ad7c-eb9f9776324a-kube-api-access-9vpq5\") pod \"collect-profiles-29395635-m9nmd\" (UID: \"332ddfd4-1fb3-43de-ad7c-eb9f9776324a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395635-m9nmd" Nov 21 15:15:00 crc kubenswrapper[5133]: I1121 15:15:00.497176 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395635-m9nmd" Nov 21 15:15:00 crc kubenswrapper[5133]: I1121 15:15:00.975838 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395635-m9nmd"] Nov 21 15:15:00 crc kubenswrapper[5133]: W1121 15:15:00.988906 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod332ddfd4_1fb3_43de_ad7c_eb9f9776324a.slice/crio-d665b531dc01a7bd88f1302674a1e95c7aba0cfd1079110c278bcfdca6232d7f WatchSource:0}: Error finding container d665b531dc01a7bd88f1302674a1e95c7aba0cfd1079110c278bcfdca6232d7f: Status 404 returned error can't find the container with id d665b531dc01a7bd88f1302674a1e95c7aba0cfd1079110c278bcfdca6232d7f Nov 21 15:15:01 crc kubenswrapper[5133]: I1121 15:15:01.556319 5133 generic.go:334] "Generic (PLEG): container finished" podID="332ddfd4-1fb3-43de-ad7c-eb9f9776324a" containerID="9e0c4c7e9cb6691640fa1721d5a4198d0b6640ea572421513998e42451e7d262" exitCode=0 Nov 21 15:15:01 crc kubenswrapper[5133]: I1121 15:15:01.556378 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395635-m9nmd" event={"ID":"332ddfd4-1fb3-43de-ad7c-eb9f9776324a","Type":"ContainerDied","Data":"9e0c4c7e9cb6691640fa1721d5a4198d0b6640ea572421513998e42451e7d262"} Nov 21 15:15:01 crc kubenswrapper[5133]: I1121 15:15:01.557796 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395635-m9nmd" event={"ID":"332ddfd4-1fb3-43de-ad7c-eb9f9776324a","Type":"ContainerStarted","Data":"d665b531dc01a7bd88f1302674a1e95c7aba0cfd1079110c278bcfdca6232d7f"} Nov 21 15:15:02 crc kubenswrapper[5133]: I1121 15:15:02.983133 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395635-m9nmd" Nov 21 15:15:03 crc kubenswrapper[5133]: I1121 15:15:03.060281 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9vpq5\" (UniqueName: \"kubernetes.io/projected/332ddfd4-1fb3-43de-ad7c-eb9f9776324a-kube-api-access-9vpq5\") pod \"332ddfd4-1fb3-43de-ad7c-eb9f9776324a\" (UID: \"332ddfd4-1fb3-43de-ad7c-eb9f9776324a\") " Nov 21 15:15:03 crc kubenswrapper[5133]: I1121 15:15:03.060439 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/332ddfd4-1fb3-43de-ad7c-eb9f9776324a-config-volume\") pod \"332ddfd4-1fb3-43de-ad7c-eb9f9776324a\" (UID: \"332ddfd4-1fb3-43de-ad7c-eb9f9776324a\") " Nov 21 15:15:03 crc kubenswrapper[5133]: I1121 15:15:03.060463 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/332ddfd4-1fb3-43de-ad7c-eb9f9776324a-secret-volume\") pod \"332ddfd4-1fb3-43de-ad7c-eb9f9776324a\" (UID: \"332ddfd4-1fb3-43de-ad7c-eb9f9776324a\") " Nov 21 15:15:03 crc kubenswrapper[5133]: I1121 15:15:03.061229 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/332ddfd4-1fb3-43de-ad7c-eb9f9776324a-config-volume" (OuterVolumeSpecName: "config-volume") pod "332ddfd4-1fb3-43de-ad7c-eb9f9776324a" (UID: "332ddfd4-1fb3-43de-ad7c-eb9f9776324a"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:15:03 crc kubenswrapper[5133]: I1121 15:15:03.066705 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/332ddfd4-1fb3-43de-ad7c-eb9f9776324a-kube-api-access-9vpq5" (OuterVolumeSpecName: "kube-api-access-9vpq5") pod "332ddfd4-1fb3-43de-ad7c-eb9f9776324a" (UID: "332ddfd4-1fb3-43de-ad7c-eb9f9776324a"). InnerVolumeSpecName "kube-api-access-9vpq5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:15:03 crc kubenswrapper[5133]: I1121 15:15:03.087162 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/332ddfd4-1fb3-43de-ad7c-eb9f9776324a-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "332ddfd4-1fb3-43de-ad7c-eb9f9776324a" (UID: "332ddfd4-1fb3-43de-ad7c-eb9f9776324a"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:15:03 crc kubenswrapper[5133]: I1121 15:15:03.162908 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9vpq5\" (UniqueName: \"kubernetes.io/projected/332ddfd4-1fb3-43de-ad7c-eb9f9776324a-kube-api-access-9vpq5\") on node \"crc\" DevicePath \"\"" Nov 21 15:15:03 crc kubenswrapper[5133]: I1121 15:15:03.162951 5133 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/332ddfd4-1fb3-43de-ad7c-eb9f9776324a-config-volume\") on node \"crc\" DevicePath \"\"" Nov 21 15:15:03 crc kubenswrapper[5133]: I1121 15:15:03.162963 5133 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/332ddfd4-1fb3-43de-ad7c-eb9f9776324a-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 21 15:15:03 crc kubenswrapper[5133]: I1121 15:15:03.578034 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395635-m9nmd" event={"ID":"332ddfd4-1fb3-43de-ad7c-eb9f9776324a","Type":"ContainerDied","Data":"d665b531dc01a7bd88f1302674a1e95c7aba0cfd1079110c278bcfdca6232d7f"} Nov 21 15:15:03 crc kubenswrapper[5133]: I1121 15:15:03.578314 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d665b531dc01a7bd88f1302674a1e95c7aba0cfd1079110c278bcfdca6232d7f" Nov 21 15:15:03 crc kubenswrapper[5133]: I1121 15:15:03.578371 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395635-m9nmd" Nov 21 15:15:04 crc kubenswrapper[5133]: I1121 15:15:04.073946 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395590-w6qvq"] Nov 21 15:15:04 crc kubenswrapper[5133]: I1121 15:15:04.082562 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395590-w6qvq"] Nov 21 15:15:04 crc kubenswrapper[5133]: I1121 15:15:04.471158 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="88666366-11ef-4546-b82f-6a71b10bb115" path="/var/lib/kubelet/pods/88666366-11ef-4546-b82f-6a71b10bb115/volumes" Nov 21 15:15:23 crc kubenswrapper[5133]: I1121 15:15:23.311489 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 15:15:23 crc kubenswrapper[5133]: I1121 15:15:23.312214 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 15:15:43 crc kubenswrapper[5133]: I1121 15:15:43.232301 5133 scope.go:117] "RemoveContainer" containerID="1a88102269bf6fb70be502741ae33e2e53e8686c4264576273ff0f276370698c" Nov 21 15:15:53 crc kubenswrapper[5133]: I1121 15:15:53.311188 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 15:15:53 crc kubenswrapper[5133]: I1121 15:15:53.312320 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 15:16:23 crc kubenswrapper[5133]: I1121 15:16:23.310183 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 15:16:23 crc kubenswrapper[5133]: I1121 15:16:23.310666 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 15:16:23 crc kubenswrapper[5133]: I1121 15:16:23.310707 5133 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" Nov 21 15:16:23 crc kubenswrapper[5133]: I1121 15:16:23.311415 5133 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"243c350f1024ea95a5f00b2cbddacf63b17dd2a595c5ab223b68dfcddc42652f"} pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 15:16:23 crc kubenswrapper[5133]: I1121 15:16:23.311470 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" containerID="cri-o://243c350f1024ea95a5f00b2cbddacf63b17dd2a595c5ab223b68dfcddc42652f" gracePeriod=600 Nov 21 15:16:23 crc kubenswrapper[5133]: E1121 15:16:23.478821 5133 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod52f5a729_05d1_4f84_a216_1df3233af57d.slice/crio-conmon-243c350f1024ea95a5f00b2cbddacf63b17dd2a595c5ab223b68dfcddc42652f.scope\": RecentStats: unable to find data in memory cache]" Nov 21 15:16:24 crc kubenswrapper[5133]: I1121 15:16:24.375137 5133 generic.go:334] "Generic (PLEG): container finished" podID="52f5a729-05d1-4f84-a216-1df3233af57d" containerID="243c350f1024ea95a5f00b2cbddacf63b17dd2a595c5ab223b68dfcddc42652f" exitCode=0 Nov 21 15:16:24 crc kubenswrapper[5133]: I1121 15:16:24.375368 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" event={"ID":"52f5a729-05d1-4f84-a216-1df3233af57d","Type":"ContainerDied","Data":"243c350f1024ea95a5f00b2cbddacf63b17dd2a595c5ab223b68dfcddc42652f"} Nov 21 15:16:24 crc kubenswrapper[5133]: I1121 15:16:24.375876 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" event={"ID":"52f5a729-05d1-4f84-a216-1df3233af57d","Type":"ContainerStarted","Data":"1d157b3c6af281d2ce3bea7313a2dc72022a8c1fac4535bc51f830a48161083c"} Nov 21 15:16:24 crc kubenswrapper[5133]: I1121 15:16:24.375903 5133 scope.go:117] "RemoveContainer" containerID="7a5c0c9b2a3bd64824e8d11ab9ec3268d4ce1e7c682c769e61779d3ac0b7de78" Nov 21 15:18:08 crc kubenswrapper[5133]: I1121 15:18:08.592225 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-k5vcf"] Nov 21 15:18:08 crc kubenswrapper[5133]: E1121 15:18:08.593463 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="332ddfd4-1fb3-43de-ad7c-eb9f9776324a" containerName="collect-profiles" Nov 21 15:18:08 crc kubenswrapper[5133]: I1121 15:18:08.593485 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="332ddfd4-1fb3-43de-ad7c-eb9f9776324a" containerName="collect-profiles" Nov 21 15:18:08 crc kubenswrapper[5133]: I1121 15:18:08.593789 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="332ddfd4-1fb3-43de-ad7c-eb9f9776324a" containerName="collect-profiles" Nov 21 15:18:08 crc kubenswrapper[5133]: I1121 15:18:08.596368 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-k5vcf" Nov 21 15:18:08 crc kubenswrapper[5133]: I1121 15:18:08.622726 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-k5vcf"] Nov 21 15:18:08 crc kubenswrapper[5133]: I1121 15:18:08.660165 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/27c7b98c-b7c4-4f9d-bf5e-e0c7b514253f-utilities\") pod \"community-operators-k5vcf\" (UID: \"27c7b98c-b7c4-4f9d-bf5e-e0c7b514253f\") " pod="openshift-marketplace/community-operators-k5vcf" Nov 21 15:18:08 crc kubenswrapper[5133]: I1121 15:18:08.660291 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/27c7b98c-b7c4-4f9d-bf5e-e0c7b514253f-catalog-content\") pod \"community-operators-k5vcf\" (UID: \"27c7b98c-b7c4-4f9d-bf5e-e0c7b514253f\") " pod="openshift-marketplace/community-operators-k5vcf" Nov 21 15:18:08 crc kubenswrapper[5133]: I1121 15:18:08.660377 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v2z2j\" (UniqueName: \"kubernetes.io/projected/27c7b98c-b7c4-4f9d-bf5e-e0c7b514253f-kube-api-access-v2z2j\") pod \"community-operators-k5vcf\" (UID: \"27c7b98c-b7c4-4f9d-bf5e-e0c7b514253f\") " pod="openshift-marketplace/community-operators-k5vcf" Nov 21 15:18:08 crc kubenswrapper[5133]: I1121 15:18:08.761848 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v2z2j\" (UniqueName: \"kubernetes.io/projected/27c7b98c-b7c4-4f9d-bf5e-e0c7b514253f-kube-api-access-v2z2j\") pod \"community-operators-k5vcf\" (UID: \"27c7b98c-b7c4-4f9d-bf5e-e0c7b514253f\") " pod="openshift-marketplace/community-operators-k5vcf" Nov 21 15:18:08 crc kubenswrapper[5133]: I1121 15:18:08.762476 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/27c7b98c-b7c4-4f9d-bf5e-e0c7b514253f-utilities\") pod \"community-operators-k5vcf\" (UID: \"27c7b98c-b7c4-4f9d-bf5e-e0c7b514253f\") " pod="openshift-marketplace/community-operators-k5vcf" Nov 21 15:18:08 crc kubenswrapper[5133]: I1121 15:18:08.762629 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/27c7b98c-b7c4-4f9d-bf5e-e0c7b514253f-catalog-content\") pod \"community-operators-k5vcf\" (UID: \"27c7b98c-b7c4-4f9d-bf5e-e0c7b514253f\") " pod="openshift-marketplace/community-operators-k5vcf" Nov 21 15:18:08 crc kubenswrapper[5133]: I1121 15:18:08.763034 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/27c7b98c-b7c4-4f9d-bf5e-e0c7b514253f-utilities\") pod \"community-operators-k5vcf\" (UID: \"27c7b98c-b7c4-4f9d-bf5e-e0c7b514253f\") " pod="openshift-marketplace/community-operators-k5vcf" Nov 21 15:18:08 crc kubenswrapper[5133]: I1121 15:18:08.763208 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/27c7b98c-b7c4-4f9d-bf5e-e0c7b514253f-catalog-content\") pod \"community-operators-k5vcf\" (UID: \"27c7b98c-b7c4-4f9d-bf5e-e0c7b514253f\") " pod="openshift-marketplace/community-operators-k5vcf" Nov 21 15:18:08 crc kubenswrapper[5133]: I1121 15:18:08.782377 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v2z2j\" (UniqueName: \"kubernetes.io/projected/27c7b98c-b7c4-4f9d-bf5e-e0c7b514253f-kube-api-access-v2z2j\") pod \"community-operators-k5vcf\" (UID: \"27c7b98c-b7c4-4f9d-bf5e-e0c7b514253f\") " pod="openshift-marketplace/community-operators-k5vcf" Nov 21 15:18:08 crc kubenswrapper[5133]: I1121 15:18:08.933365 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-k5vcf" Nov 21 15:18:09 crc kubenswrapper[5133]: I1121 15:18:09.455823 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-k5vcf"] Nov 21 15:18:10 crc kubenswrapper[5133]: I1121 15:18:10.346739 5133 generic.go:334] "Generic (PLEG): container finished" podID="27c7b98c-b7c4-4f9d-bf5e-e0c7b514253f" containerID="0df2321055c87bd8213b0eab57ec88800fc362c723f3ded7ca8ff56e2a203f57" exitCode=0 Nov 21 15:18:10 crc kubenswrapper[5133]: I1121 15:18:10.346835 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-k5vcf" event={"ID":"27c7b98c-b7c4-4f9d-bf5e-e0c7b514253f","Type":"ContainerDied","Data":"0df2321055c87bd8213b0eab57ec88800fc362c723f3ded7ca8ff56e2a203f57"} Nov 21 15:18:10 crc kubenswrapper[5133]: I1121 15:18:10.347290 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-k5vcf" event={"ID":"27c7b98c-b7c4-4f9d-bf5e-e0c7b514253f","Type":"ContainerStarted","Data":"087db5a4311f6ef212d1eec2589d77028e60a76a435270fb593bb5896b24e02e"} Nov 21 15:18:11 crc kubenswrapper[5133]: I1121 15:18:11.380760 5133 generic.go:334] "Generic (PLEG): container finished" podID="27c7b98c-b7c4-4f9d-bf5e-e0c7b514253f" containerID="44d6d81a73415f9dbac90bd8934afc46b2f62f6c0fb8a94914125d26e97249c8" exitCode=0 Nov 21 15:18:11 crc kubenswrapper[5133]: I1121 15:18:11.380806 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-k5vcf" event={"ID":"27c7b98c-b7c4-4f9d-bf5e-e0c7b514253f","Type":"ContainerDied","Data":"44d6d81a73415f9dbac90bd8934afc46b2f62f6c0fb8a94914125d26e97249c8"} Nov 21 15:18:12 crc kubenswrapper[5133]: I1121 15:18:12.392974 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-k5vcf" event={"ID":"27c7b98c-b7c4-4f9d-bf5e-e0c7b514253f","Type":"ContainerStarted","Data":"9e198ee05b1d64e2bed7a558de880d4e922a646274347407c7c7234ea3a41c32"} Nov 21 15:18:12 crc kubenswrapper[5133]: I1121 15:18:12.419579 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-k5vcf" podStartSLOduration=2.973960398 podStartE2EDuration="4.419556181s" podCreationTimestamp="2025-11-21 15:18:08 +0000 UTC" firstStartedPulling="2025-11-21 15:18:10.354611915 +0000 UTC m=+5750.152444163" lastFinishedPulling="2025-11-21 15:18:11.800207698 +0000 UTC m=+5751.598039946" observedRunningTime="2025-11-21 15:18:12.411331162 +0000 UTC m=+5752.209163410" watchObservedRunningTime="2025-11-21 15:18:12.419556181 +0000 UTC m=+5752.217388429" Nov 21 15:18:13 crc kubenswrapper[5133]: I1121 15:18:13.383500 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-x5tzm"] Nov 21 15:18:13 crc kubenswrapper[5133]: I1121 15:18:13.385579 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-x5tzm" Nov 21 15:18:13 crc kubenswrapper[5133]: I1121 15:18:13.401190 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-x5tzm"] Nov 21 15:18:13 crc kubenswrapper[5133]: I1121 15:18:13.459250 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/60d394d2-ef23-4cd8-bc91-f26ec69c77e0-utilities\") pod \"redhat-operators-x5tzm\" (UID: \"60d394d2-ef23-4cd8-bc91-f26ec69c77e0\") " pod="openshift-marketplace/redhat-operators-x5tzm" Nov 21 15:18:13 crc kubenswrapper[5133]: I1121 15:18:13.459653 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m5p9w\" (UniqueName: \"kubernetes.io/projected/60d394d2-ef23-4cd8-bc91-f26ec69c77e0-kube-api-access-m5p9w\") pod \"redhat-operators-x5tzm\" (UID: \"60d394d2-ef23-4cd8-bc91-f26ec69c77e0\") " pod="openshift-marketplace/redhat-operators-x5tzm" Nov 21 15:18:13 crc kubenswrapper[5133]: I1121 15:18:13.459703 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/60d394d2-ef23-4cd8-bc91-f26ec69c77e0-catalog-content\") pod \"redhat-operators-x5tzm\" (UID: \"60d394d2-ef23-4cd8-bc91-f26ec69c77e0\") " pod="openshift-marketplace/redhat-operators-x5tzm" Nov 21 15:18:13 crc kubenswrapper[5133]: I1121 15:18:13.561837 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/60d394d2-ef23-4cd8-bc91-f26ec69c77e0-utilities\") pod \"redhat-operators-x5tzm\" (UID: \"60d394d2-ef23-4cd8-bc91-f26ec69c77e0\") " pod="openshift-marketplace/redhat-operators-x5tzm" Nov 21 15:18:13 crc kubenswrapper[5133]: I1121 15:18:13.561932 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m5p9w\" (UniqueName: \"kubernetes.io/projected/60d394d2-ef23-4cd8-bc91-f26ec69c77e0-kube-api-access-m5p9w\") pod \"redhat-operators-x5tzm\" (UID: \"60d394d2-ef23-4cd8-bc91-f26ec69c77e0\") " pod="openshift-marketplace/redhat-operators-x5tzm" Nov 21 15:18:13 crc kubenswrapper[5133]: I1121 15:18:13.561974 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/60d394d2-ef23-4cd8-bc91-f26ec69c77e0-catalog-content\") pod \"redhat-operators-x5tzm\" (UID: \"60d394d2-ef23-4cd8-bc91-f26ec69c77e0\") " pod="openshift-marketplace/redhat-operators-x5tzm" Nov 21 15:18:13 crc kubenswrapper[5133]: I1121 15:18:13.562521 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/60d394d2-ef23-4cd8-bc91-f26ec69c77e0-utilities\") pod \"redhat-operators-x5tzm\" (UID: \"60d394d2-ef23-4cd8-bc91-f26ec69c77e0\") " pod="openshift-marketplace/redhat-operators-x5tzm" Nov 21 15:18:13 crc kubenswrapper[5133]: I1121 15:18:13.562598 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/60d394d2-ef23-4cd8-bc91-f26ec69c77e0-catalog-content\") pod \"redhat-operators-x5tzm\" (UID: \"60d394d2-ef23-4cd8-bc91-f26ec69c77e0\") " pod="openshift-marketplace/redhat-operators-x5tzm" Nov 21 15:18:13 crc kubenswrapper[5133]: I1121 15:18:13.585770 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m5p9w\" (UniqueName: \"kubernetes.io/projected/60d394d2-ef23-4cd8-bc91-f26ec69c77e0-kube-api-access-m5p9w\") pod \"redhat-operators-x5tzm\" (UID: \"60d394d2-ef23-4cd8-bc91-f26ec69c77e0\") " pod="openshift-marketplace/redhat-operators-x5tzm" Nov 21 15:18:13 crc kubenswrapper[5133]: I1121 15:18:13.705908 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-x5tzm" Nov 21 15:18:14 crc kubenswrapper[5133]: I1121 15:18:14.135748 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-x5tzm"] Nov 21 15:18:14 crc kubenswrapper[5133]: I1121 15:18:14.415812 5133 generic.go:334] "Generic (PLEG): container finished" podID="60d394d2-ef23-4cd8-bc91-f26ec69c77e0" containerID="a76ae365e7d2968d4a394dc4e076ad8628a96f112e5926656d91afb8c859f113" exitCode=0 Nov 21 15:18:14 crc kubenswrapper[5133]: I1121 15:18:14.415875 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-x5tzm" event={"ID":"60d394d2-ef23-4cd8-bc91-f26ec69c77e0","Type":"ContainerDied","Data":"a76ae365e7d2968d4a394dc4e076ad8628a96f112e5926656d91afb8c859f113"} Nov 21 15:18:14 crc kubenswrapper[5133]: I1121 15:18:14.416328 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-x5tzm" event={"ID":"60d394d2-ef23-4cd8-bc91-f26ec69c77e0","Type":"ContainerStarted","Data":"b0568785ae97c4da4631304e2130c31ff332d369da7d9341bd581c5098f08551"} Nov 21 15:18:16 crc kubenswrapper[5133]: I1121 15:18:16.439812 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-x5tzm" event={"ID":"60d394d2-ef23-4cd8-bc91-f26ec69c77e0","Type":"ContainerStarted","Data":"946b0e365433954f22702c469a6849f1d1e7f57dba11bbc5ad6220f4e5fdbaab"} Nov 21 15:18:18 crc kubenswrapper[5133]: I1121 15:18:18.934072 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-k5vcf" Nov 21 15:18:18 crc kubenswrapper[5133]: I1121 15:18:18.935021 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-k5vcf" Nov 21 15:18:19 crc kubenswrapper[5133]: I1121 15:18:19.004123 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-k5vcf" Nov 21 15:18:19 crc kubenswrapper[5133]: I1121 15:18:19.473323 5133 generic.go:334] "Generic (PLEG): container finished" podID="60d394d2-ef23-4cd8-bc91-f26ec69c77e0" containerID="946b0e365433954f22702c469a6849f1d1e7f57dba11bbc5ad6220f4e5fdbaab" exitCode=0 Nov 21 15:18:19 crc kubenswrapper[5133]: I1121 15:18:19.473418 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-x5tzm" event={"ID":"60d394d2-ef23-4cd8-bc91-f26ec69c77e0","Type":"ContainerDied","Data":"946b0e365433954f22702c469a6849f1d1e7f57dba11bbc5ad6220f4e5fdbaab"} Nov 21 15:18:19 crc kubenswrapper[5133]: I1121 15:18:19.546651 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-k5vcf" Nov 21 15:18:20 crc kubenswrapper[5133]: I1121 15:18:20.483465 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-x5tzm" event={"ID":"60d394d2-ef23-4cd8-bc91-f26ec69c77e0","Type":"ContainerStarted","Data":"16054e31ac7a5a67a555e8367dc5f7875498ab8d703f0b1827a4302acfafafd6"} Nov 21 15:18:20 crc kubenswrapper[5133]: I1121 15:18:20.503899 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-x5tzm" podStartSLOduration=1.672983103 podStartE2EDuration="7.503880645s" podCreationTimestamp="2025-11-21 15:18:13 +0000 UTC" firstStartedPulling="2025-11-21 15:18:14.417400331 +0000 UTC m=+5754.215232579" lastFinishedPulling="2025-11-21 15:18:20.248297833 +0000 UTC m=+5760.046130121" observedRunningTime="2025-11-21 15:18:20.502646092 +0000 UTC m=+5760.300478350" watchObservedRunningTime="2025-11-21 15:18:20.503880645 +0000 UTC m=+5760.301712913" Nov 21 15:18:21 crc kubenswrapper[5133]: I1121 15:18:21.787823 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-k5vcf"] Nov 21 15:18:21 crc kubenswrapper[5133]: I1121 15:18:21.788581 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-k5vcf" podUID="27c7b98c-b7c4-4f9d-bf5e-e0c7b514253f" containerName="registry-server" containerID="cri-o://9e198ee05b1d64e2bed7a558de880d4e922a646274347407c7c7234ea3a41c32" gracePeriod=2 Nov 21 15:18:23 crc kubenswrapper[5133]: I1121 15:18:23.162122 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-k5vcf" Nov 21 15:18:23 crc kubenswrapper[5133]: I1121 15:18:23.244932 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v2z2j\" (UniqueName: \"kubernetes.io/projected/27c7b98c-b7c4-4f9d-bf5e-e0c7b514253f-kube-api-access-v2z2j\") pod \"27c7b98c-b7c4-4f9d-bf5e-e0c7b514253f\" (UID: \"27c7b98c-b7c4-4f9d-bf5e-e0c7b514253f\") " Nov 21 15:18:23 crc kubenswrapper[5133]: I1121 15:18:23.245399 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/27c7b98c-b7c4-4f9d-bf5e-e0c7b514253f-utilities\") pod \"27c7b98c-b7c4-4f9d-bf5e-e0c7b514253f\" (UID: \"27c7b98c-b7c4-4f9d-bf5e-e0c7b514253f\") " Nov 21 15:18:23 crc kubenswrapper[5133]: I1121 15:18:23.245576 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/27c7b98c-b7c4-4f9d-bf5e-e0c7b514253f-catalog-content\") pod \"27c7b98c-b7c4-4f9d-bf5e-e0c7b514253f\" (UID: \"27c7b98c-b7c4-4f9d-bf5e-e0c7b514253f\") " Nov 21 15:18:23 crc kubenswrapper[5133]: I1121 15:18:23.245881 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/27c7b98c-b7c4-4f9d-bf5e-e0c7b514253f-utilities" (OuterVolumeSpecName: "utilities") pod "27c7b98c-b7c4-4f9d-bf5e-e0c7b514253f" (UID: "27c7b98c-b7c4-4f9d-bf5e-e0c7b514253f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:18:23 crc kubenswrapper[5133]: I1121 15:18:23.246387 5133 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/27c7b98c-b7c4-4f9d-bf5e-e0c7b514253f-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 15:18:23 crc kubenswrapper[5133]: I1121 15:18:23.253920 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/27c7b98c-b7c4-4f9d-bf5e-e0c7b514253f-kube-api-access-v2z2j" (OuterVolumeSpecName: "kube-api-access-v2z2j") pod "27c7b98c-b7c4-4f9d-bf5e-e0c7b514253f" (UID: "27c7b98c-b7c4-4f9d-bf5e-e0c7b514253f"). InnerVolumeSpecName "kube-api-access-v2z2j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:18:23 crc kubenswrapper[5133]: I1121 15:18:23.296313 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/27c7b98c-b7c4-4f9d-bf5e-e0c7b514253f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "27c7b98c-b7c4-4f9d-bf5e-e0c7b514253f" (UID: "27c7b98c-b7c4-4f9d-bf5e-e0c7b514253f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:18:23 crc kubenswrapper[5133]: I1121 15:18:23.310971 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 15:18:23 crc kubenswrapper[5133]: I1121 15:18:23.311432 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 15:18:23 crc kubenswrapper[5133]: I1121 15:18:23.348230 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v2z2j\" (UniqueName: \"kubernetes.io/projected/27c7b98c-b7c4-4f9d-bf5e-e0c7b514253f-kube-api-access-v2z2j\") on node \"crc\" DevicePath \"\"" Nov 21 15:18:23 crc kubenswrapper[5133]: I1121 15:18:23.348268 5133 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/27c7b98c-b7c4-4f9d-bf5e-e0c7b514253f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 15:18:23 crc kubenswrapper[5133]: I1121 15:18:23.512904 5133 generic.go:334] "Generic (PLEG): container finished" podID="27c7b98c-b7c4-4f9d-bf5e-e0c7b514253f" containerID="9e198ee05b1d64e2bed7a558de880d4e922a646274347407c7c7234ea3a41c32" exitCode=0 Nov 21 15:18:23 crc kubenswrapper[5133]: I1121 15:18:23.512952 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-k5vcf" event={"ID":"27c7b98c-b7c4-4f9d-bf5e-e0c7b514253f","Type":"ContainerDied","Data":"9e198ee05b1d64e2bed7a558de880d4e922a646274347407c7c7234ea3a41c32"} Nov 21 15:18:23 crc kubenswrapper[5133]: I1121 15:18:23.512992 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-k5vcf" event={"ID":"27c7b98c-b7c4-4f9d-bf5e-e0c7b514253f","Type":"ContainerDied","Data":"087db5a4311f6ef212d1eec2589d77028e60a76a435270fb593bb5896b24e02e"} Nov 21 15:18:23 crc kubenswrapper[5133]: I1121 15:18:23.513044 5133 scope.go:117] "RemoveContainer" containerID="9e198ee05b1d64e2bed7a558de880d4e922a646274347407c7c7234ea3a41c32" Nov 21 15:18:23 crc kubenswrapper[5133]: I1121 15:18:23.513083 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-k5vcf" Nov 21 15:18:23 crc kubenswrapper[5133]: I1121 15:18:23.541607 5133 scope.go:117] "RemoveContainer" containerID="44d6d81a73415f9dbac90bd8934afc46b2f62f6c0fb8a94914125d26e97249c8" Nov 21 15:18:23 crc kubenswrapper[5133]: I1121 15:18:23.576260 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-k5vcf"] Nov 21 15:18:23 crc kubenswrapper[5133]: I1121 15:18:23.595055 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-k5vcf"] Nov 21 15:18:23 crc kubenswrapper[5133]: I1121 15:18:23.596945 5133 scope.go:117] "RemoveContainer" containerID="0df2321055c87bd8213b0eab57ec88800fc362c723f3ded7ca8ff56e2a203f57" Nov 21 15:18:23 crc kubenswrapper[5133]: I1121 15:18:23.641238 5133 scope.go:117] "RemoveContainer" containerID="9e198ee05b1d64e2bed7a558de880d4e922a646274347407c7c7234ea3a41c32" Nov 21 15:18:23 crc kubenswrapper[5133]: E1121 15:18:23.641961 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9e198ee05b1d64e2bed7a558de880d4e922a646274347407c7c7234ea3a41c32\": container with ID starting with 9e198ee05b1d64e2bed7a558de880d4e922a646274347407c7c7234ea3a41c32 not found: ID does not exist" containerID="9e198ee05b1d64e2bed7a558de880d4e922a646274347407c7c7234ea3a41c32" Nov 21 15:18:23 crc kubenswrapper[5133]: I1121 15:18:23.642031 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9e198ee05b1d64e2bed7a558de880d4e922a646274347407c7c7234ea3a41c32"} err="failed to get container status \"9e198ee05b1d64e2bed7a558de880d4e922a646274347407c7c7234ea3a41c32\": rpc error: code = NotFound desc = could not find container \"9e198ee05b1d64e2bed7a558de880d4e922a646274347407c7c7234ea3a41c32\": container with ID starting with 9e198ee05b1d64e2bed7a558de880d4e922a646274347407c7c7234ea3a41c32 not found: ID does not exist" Nov 21 15:18:23 crc kubenswrapper[5133]: I1121 15:18:23.642063 5133 scope.go:117] "RemoveContainer" containerID="44d6d81a73415f9dbac90bd8934afc46b2f62f6c0fb8a94914125d26e97249c8" Nov 21 15:18:23 crc kubenswrapper[5133]: E1121 15:18:23.642488 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"44d6d81a73415f9dbac90bd8934afc46b2f62f6c0fb8a94914125d26e97249c8\": container with ID starting with 44d6d81a73415f9dbac90bd8934afc46b2f62f6c0fb8a94914125d26e97249c8 not found: ID does not exist" containerID="44d6d81a73415f9dbac90bd8934afc46b2f62f6c0fb8a94914125d26e97249c8" Nov 21 15:18:23 crc kubenswrapper[5133]: I1121 15:18:23.642513 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"44d6d81a73415f9dbac90bd8934afc46b2f62f6c0fb8a94914125d26e97249c8"} err="failed to get container status \"44d6d81a73415f9dbac90bd8934afc46b2f62f6c0fb8a94914125d26e97249c8\": rpc error: code = NotFound desc = could not find container \"44d6d81a73415f9dbac90bd8934afc46b2f62f6c0fb8a94914125d26e97249c8\": container with ID starting with 44d6d81a73415f9dbac90bd8934afc46b2f62f6c0fb8a94914125d26e97249c8 not found: ID does not exist" Nov 21 15:18:23 crc kubenswrapper[5133]: I1121 15:18:23.642529 5133 scope.go:117] "RemoveContainer" containerID="0df2321055c87bd8213b0eab57ec88800fc362c723f3ded7ca8ff56e2a203f57" Nov 21 15:18:23 crc kubenswrapper[5133]: E1121 15:18:23.642748 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0df2321055c87bd8213b0eab57ec88800fc362c723f3ded7ca8ff56e2a203f57\": container with ID starting with 0df2321055c87bd8213b0eab57ec88800fc362c723f3ded7ca8ff56e2a203f57 not found: ID does not exist" containerID="0df2321055c87bd8213b0eab57ec88800fc362c723f3ded7ca8ff56e2a203f57" Nov 21 15:18:23 crc kubenswrapper[5133]: I1121 15:18:23.642774 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0df2321055c87bd8213b0eab57ec88800fc362c723f3ded7ca8ff56e2a203f57"} err="failed to get container status \"0df2321055c87bd8213b0eab57ec88800fc362c723f3ded7ca8ff56e2a203f57\": rpc error: code = NotFound desc = could not find container \"0df2321055c87bd8213b0eab57ec88800fc362c723f3ded7ca8ff56e2a203f57\": container with ID starting with 0df2321055c87bd8213b0eab57ec88800fc362c723f3ded7ca8ff56e2a203f57 not found: ID does not exist" Nov 21 15:18:23 crc kubenswrapper[5133]: I1121 15:18:23.706600 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-x5tzm" Nov 21 15:18:23 crc kubenswrapper[5133]: I1121 15:18:23.706727 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-x5tzm" Nov 21 15:18:24 crc kubenswrapper[5133]: I1121 15:18:24.471121 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="27c7b98c-b7c4-4f9d-bf5e-e0c7b514253f" path="/var/lib/kubelet/pods/27c7b98c-b7c4-4f9d-bf5e-e0c7b514253f/volumes" Nov 21 15:18:24 crc kubenswrapper[5133]: I1121 15:18:24.758959 5133 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-x5tzm" podUID="60d394d2-ef23-4cd8-bc91-f26ec69c77e0" containerName="registry-server" probeResult="failure" output=< Nov 21 15:18:24 crc kubenswrapper[5133]: timeout: failed to connect service ":50051" within 1s Nov 21 15:18:24 crc kubenswrapper[5133]: > Nov 21 15:18:33 crc kubenswrapper[5133]: I1121 15:18:33.758064 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-x5tzm" Nov 21 15:18:33 crc kubenswrapper[5133]: I1121 15:18:33.820502 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-x5tzm" Nov 21 15:18:33 crc kubenswrapper[5133]: I1121 15:18:33.992254 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-x5tzm"] Nov 21 15:18:35 crc kubenswrapper[5133]: I1121 15:18:35.637924 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-x5tzm" podUID="60d394d2-ef23-4cd8-bc91-f26ec69c77e0" containerName="registry-server" containerID="cri-o://16054e31ac7a5a67a555e8367dc5f7875498ab8d703f0b1827a4302acfafafd6" gracePeriod=2 Nov 21 15:18:36 crc kubenswrapper[5133]: I1121 15:18:36.201363 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-x5tzm" Nov 21 15:18:36 crc kubenswrapper[5133]: I1121 15:18:36.221373 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/60d394d2-ef23-4cd8-bc91-f26ec69c77e0-catalog-content\") pod \"60d394d2-ef23-4cd8-bc91-f26ec69c77e0\" (UID: \"60d394d2-ef23-4cd8-bc91-f26ec69c77e0\") " Nov 21 15:18:36 crc kubenswrapper[5133]: I1121 15:18:36.221468 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/60d394d2-ef23-4cd8-bc91-f26ec69c77e0-utilities\") pod \"60d394d2-ef23-4cd8-bc91-f26ec69c77e0\" (UID: \"60d394d2-ef23-4cd8-bc91-f26ec69c77e0\") " Nov 21 15:18:36 crc kubenswrapper[5133]: I1121 15:18:36.221549 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m5p9w\" (UniqueName: \"kubernetes.io/projected/60d394d2-ef23-4cd8-bc91-f26ec69c77e0-kube-api-access-m5p9w\") pod \"60d394d2-ef23-4cd8-bc91-f26ec69c77e0\" (UID: \"60d394d2-ef23-4cd8-bc91-f26ec69c77e0\") " Nov 21 15:18:36 crc kubenswrapper[5133]: I1121 15:18:36.222742 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/60d394d2-ef23-4cd8-bc91-f26ec69c77e0-utilities" (OuterVolumeSpecName: "utilities") pod "60d394d2-ef23-4cd8-bc91-f26ec69c77e0" (UID: "60d394d2-ef23-4cd8-bc91-f26ec69c77e0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:18:36 crc kubenswrapper[5133]: I1121 15:18:36.230306 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/60d394d2-ef23-4cd8-bc91-f26ec69c77e0-kube-api-access-m5p9w" (OuterVolumeSpecName: "kube-api-access-m5p9w") pod "60d394d2-ef23-4cd8-bc91-f26ec69c77e0" (UID: "60d394d2-ef23-4cd8-bc91-f26ec69c77e0"). InnerVolumeSpecName "kube-api-access-m5p9w". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:18:36 crc kubenswrapper[5133]: I1121 15:18:36.320307 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/60d394d2-ef23-4cd8-bc91-f26ec69c77e0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "60d394d2-ef23-4cd8-bc91-f26ec69c77e0" (UID: "60d394d2-ef23-4cd8-bc91-f26ec69c77e0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:18:36 crc kubenswrapper[5133]: I1121 15:18:36.323502 5133 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/60d394d2-ef23-4cd8-bc91-f26ec69c77e0-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 15:18:36 crc kubenswrapper[5133]: I1121 15:18:36.323556 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m5p9w\" (UniqueName: \"kubernetes.io/projected/60d394d2-ef23-4cd8-bc91-f26ec69c77e0-kube-api-access-m5p9w\") on node \"crc\" DevicePath \"\"" Nov 21 15:18:36 crc kubenswrapper[5133]: I1121 15:18:36.323571 5133 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/60d394d2-ef23-4cd8-bc91-f26ec69c77e0-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 15:18:36 crc kubenswrapper[5133]: I1121 15:18:36.648316 5133 generic.go:334] "Generic (PLEG): container finished" podID="60d394d2-ef23-4cd8-bc91-f26ec69c77e0" containerID="16054e31ac7a5a67a555e8367dc5f7875498ab8d703f0b1827a4302acfafafd6" exitCode=0 Nov 21 15:18:36 crc kubenswrapper[5133]: I1121 15:18:36.648404 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-x5tzm" event={"ID":"60d394d2-ef23-4cd8-bc91-f26ec69c77e0","Type":"ContainerDied","Data":"16054e31ac7a5a67a555e8367dc5f7875498ab8d703f0b1827a4302acfafafd6"} Nov 21 15:18:36 crc kubenswrapper[5133]: I1121 15:18:36.648438 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-x5tzm" event={"ID":"60d394d2-ef23-4cd8-bc91-f26ec69c77e0","Type":"ContainerDied","Data":"b0568785ae97c4da4631304e2130c31ff332d369da7d9341bd581c5098f08551"} Nov 21 15:18:36 crc kubenswrapper[5133]: I1121 15:18:36.648459 5133 scope.go:117] "RemoveContainer" containerID="16054e31ac7a5a67a555e8367dc5f7875498ab8d703f0b1827a4302acfafafd6" Nov 21 15:18:36 crc kubenswrapper[5133]: I1121 15:18:36.648639 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-x5tzm" Nov 21 15:18:36 crc kubenswrapper[5133]: I1121 15:18:36.674158 5133 scope.go:117] "RemoveContainer" containerID="946b0e365433954f22702c469a6849f1d1e7f57dba11bbc5ad6220f4e5fdbaab" Nov 21 15:18:36 crc kubenswrapper[5133]: I1121 15:18:36.681521 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-x5tzm"] Nov 21 15:18:36 crc kubenswrapper[5133]: I1121 15:18:36.692876 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-x5tzm"] Nov 21 15:18:36 crc kubenswrapper[5133]: I1121 15:18:36.702331 5133 scope.go:117] "RemoveContainer" containerID="a76ae365e7d2968d4a394dc4e076ad8628a96f112e5926656d91afb8c859f113" Nov 21 15:18:36 crc kubenswrapper[5133]: I1121 15:18:36.740919 5133 scope.go:117] "RemoveContainer" containerID="16054e31ac7a5a67a555e8367dc5f7875498ab8d703f0b1827a4302acfafafd6" Nov 21 15:18:36 crc kubenswrapper[5133]: E1121 15:18:36.741438 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"16054e31ac7a5a67a555e8367dc5f7875498ab8d703f0b1827a4302acfafafd6\": container with ID starting with 16054e31ac7a5a67a555e8367dc5f7875498ab8d703f0b1827a4302acfafafd6 not found: ID does not exist" containerID="16054e31ac7a5a67a555e8367dc5f7875498ab8d703f0b1827a4302acfafafd6" Nov 21 15:18:36 crc kubenswrapper[5133]: I1121 15:18:36.741495 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"16054e31ac7a5a67a555e8367dc5f7875498ab8d703f0b1827a4302acfafafd6"} err="failed to get container status \"16054e31ac7a5a67a555e8367dc5f7875498ab8d703f0b1827a4302acfafafd6\": rpc error: code = NotFound desc = could not find container \"16054e31ac7a5a67a555e8367dc5f7875498ab8d703f0b1827a4302acfafafd6\": container with ID starting with 16054e31ac7a5a67a555e8367dc5f7875498ab8d703f0b1827a4302acfafafd6 not found: ID does not exist" Nov 21 15:18:36 crc kubenswrapper[5133]: I1121 15:18:36.741530 5133 scope.go:117] "RemoveContainer" containerID="946b0e365433954f22702c469a6849f1d1e7f57dba11bbc5ad6220f4e5fdbaab" Nov 21 15:18:36 crc kubenswrapper[5133]: E1121 15:18:36.741816 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"946b0e365433954f22702c469a6849f1d1e7f57dba11bbc5ad6220f4e5fdbaab\": container with ID starting with 946b0e365433954f22702c469a6849f1d1e7f57dba11bbc5ad6220f4e5fdbaab not found: ID does not exist" containerID="946b0e365433954f22702c469a6849f1d1e7f57dba11bbc5ad6220f4e5fdbaab" Nov 21 15:18:36 crc kubenswrapper[5133]: I1121 15:18:36.741850 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"946b0e365433954f22702c469a6849f1d1e7f57dba11bbc5ad6220f4e5fdbaab"} err="failed to get container status \"946b0e365433954f22702c469a6849f1d1e7f57dba11bbc5ad6220f4e5fdbaab\": rpc error: code = NotFound desc = could not find container \"946b0e365433954f22702c469a6849f1d1e7f57dba11bbc5ad6220f4e5fdbaab\": container with ID starting with 946b0e365433954f22702c469a6849f1d1e7f57dba11bbc5ad6220f4e5fdbaab not found: ID does not exist" Nov 21 15:18:36 crc kubenswrapper[5133]: I1121 15:18:36.741873 5133 scope.go:117] "RemoveContainer" containerID="a76ae365e7d2968d4a394dc4e076ad8628a96f112e5926656d91afb8c859f113" Nov 21 15:18:36 crc kubenswrapper[5133]: E1121 15:18:36.742252 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a76ae365e7d2968d4a394dc4e076ad8628a96f112e5926656d91afb8c859f113\": container with ID starting with a76ae365e7d2968d4a394dc4e076ad8628a96f112e5926656d91afb8c859f113 not found: ID does not exist" containerID="a76ae365e7d2968d4a394dc4e076ad8628a96f112e5926656d91afb8c859f113" Nov 21 15:18:36 crc kubenswrapper[5133]: I1121 15:18:36.742282 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a76ae365e7d2968d4a394dc4e076ad8628a96f112e5926656d91afb8c859f113"} err="failed to get container status \"a76ae365e7d2968d4a394dc4e076ad8628a96f112e5926656d91afb8c859f113\": rpc error: code = NotFound desc = could not find container \"a76ae365e7d2968d4a394dc4e076ad8628a96f112e5926656d91afb8c859f113\": container with ID starting with a76ae365e7d2968d4a394dc4e076ad8628a96f112e5926656d91afb8c859f113 not found: ID does not exist" Nov 21 15:18:38 crc kubenswrapper[5133]: I1121 15:18:38.471827 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="60d394d2-ef23-4cd8-bc91-f26ec69c77e0" path="/var/lib/kubelet/pods/60d394d2-ef23-4cd8-bc91-f26ec69c77e0/volumes" Nov 21 15:18:53 crc kubenswrapper[5133]: I1121 15:18:53.310924 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 15:18:53 crc kubenswrapper[5133]: I1121 15:18:53.311505 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 15:19:23 crc kubenswrapper[5133]: I1121 15:19:23.310600 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 15:19:23 crc kubenswrapper[5133]: I1121 15:19:23.311134 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 15:19:23 crc kubenswrapper[5133]: I1121 15:19:23.311173 5133 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" Nov 21 15:19:23 crc kubenswrapper[5133]: I1121 15:19:23.311881 5133 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"1d157b3c6af281d2ce3bea7313a2dc72022a8c1fac4535bc51f830a48161083c"} pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 15:19:23 crc kubenswrapper[5133]: I1121 15:19:23.311937 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" containerID="cri-o://1d157b3c6af281d2ce3bea7313a2dc72022a8c1fac4535bc51f830a48161083c" gracePeriod=600 Nov 21 15:19:23 crc kubenswrapper[5133]: E1121 15:19:23.443220 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:19:24 crc kubenswrapper[5133]: I1121 15:19:24.092758 5133 generic.go:334] "Generic (PLEG): container finished" podID="52f5a729-05d1-4f84-a216-1df3233af57d" containerID="1d157b3c6af281d2ce3bea7313a2dc72022a8c1fac4535bc51f830a48161083c" exitCode=0 Nov 21 15:19:24 crc kubenswrapper[5133]: I1121 15:19:24.092818 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" event={"ID":"52f5a729-05d1-4f84-a216-1df3233af57d","Type":"ContainerDied","Data":"1d157b3c6af281d2ce3bea7313a2dc72022a8c1fac4535bc51f830a48161083c"} Nov 21 15:19:24 crc kubenswrapper[5133]: I1121 15:19:24.092868 5133 scope.go:117] "RemoveContainer" containerID="243c350f1024ea95a5f00b2cbddacf63b17dd2a595c5ab223b68dfcddc42652f" Nov 21 15:19:24 crc kubenswrapper[5133]: I1121 15:19:24.093658 5133 scope.go:117] "RemoveContainer" containerID="1d157b3c6af281d2ce3bea7313a2dc72022a8c1fac4535bc51f830a48161083c" Nov 21 15:19:24 crc kubenswrapper[5133]: E1121 15:19:24.094172 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:19:36 crc kubenswrapper[5133]: I1121 15:19:36.458389 5133 scope.go:117] "RemoveContainer" containerID="1d157b3c6af281d2ce3bea7313a2dc72022a8c1fac4535bc51f830a48161083c" Nov 21 15:19:36 crc kubenswrapper[5133]: E1121 15:19:36.459390 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:19:48 crc kubenswrapper[5133]: I1121 15:19:48.457266 5133 scope.go:117] "RemoveContainer" containerID="1d157b3c6af281d2ce3bea7313a2dc72022a8c1fac4535bc51f830a48161083c" Nov 21 15:19:48 crc kubenswrapper[5133]: E1121 15:19:48.458149 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:20:01 crc kubenswrapper[5133]: I1121 15:20:01.457954 5133 scope.go:117] "RemoveContainer" containerID="1d157b3c6af281d2ce3bea7313a2dc72022a8c1fac4535bc51f830a48161083c" Nov 21 15:20:01 crc kubenswrapper[5133]: E1121 15:20:01.458918 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:20:14 crc kubenswrapper[5133]: I1121 15:20:14.457458 5133 scope.go:117] "RemoveContainer" containerID="1d157b3c6af281d2ce3bea7313a2dc72022a8c1fac4535bc51f830a48161083c" Nov 21 15:20:14 crc kubenswrapper[5133]: E1121 15:20:14.458407 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:20:28 crc kubenswrapper[5133]: I1121 15:20:28.457908 5133 scope.go:117] "RemoveContainer" containerID="1d157b3c6af281d2ce3bea7313a2dc72022a8c1fac4535bc51f830a48161083c" Nov 21 15:20:28 crc kubenswrapper[5133]: E1121 15:20:28.459711 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:20:42 crc kubenswrapper[5133]: I1121 15:20:42.464292 5133 scope.go:117] "RemoveContainer" containerID="1d157b3c6af281d2ce3bea7313a2dc72022a8c1fac4535bc51f830a48161083c" Nov 21 15:20:42 crc kubenswrapper[5133]: E1121 15:20:42.465041 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:20:57 crc kubenswrapper[5133]: I1121 15:20:57.458120 5133 scope.go:117] "RemoveContainer" containerID="1d157b3c6af281d2ce3bea7313a2dc72022a8c1fac4535bc51f830a48161083c" Nov 21 15:20:57 crc kubenswrapper[5133]: E1121 15:20:57.458800 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:21:12 crc kubenswrapper[5133]: I1121 15:21:12.465930 5133 scope.go:117] "RemoveContainer" containerID="1d157b3c6af281d2ce3bea7313a2dc72022a8c1fac4535bc51f830a48161083c" Nov 21 15:21:12 crc kubenswrapper[5133]: E1121 15:21:12.466932 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:21:23 crc kubenswrapper[5133]: I1121 15:21:23.458696 5133 scope.go:117] "RemoveContainer" containerID="1d157b3c6af281d2ce3bea7313a2dc72022a8c1fac4535bc51f830a48161083c" Nov 21 15:21:23 crc kubenswrapper[5133]: E1121 15:21:23.460134 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:21:34 crc kubenswrapper[5133]: I1121 15:21:34.460987 5133 scope.go:117] "RemoveContainer" containerID="1d157b3c6af281d2ce3bea7313a2dc72022a8c1fac4535bc51f830a48161083c" Nov 21 15:21:34 crc kubenswrapper[5133]: E1121 15:21:34.461799 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:21:45 crc kubenswrapper[5133]: I1121 15:21:45.422237 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-dmf5x"] Nov 21 15:21:45 crc kubenswrapper[5133]: E1121 15:21:45.423208 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="60d394d2-ef23-4cd8-bc91-f26ec69c77e0" containerName="extract-content" Nov 21 15:21:45 crc kubenswrapper[5133]: I1121 15:21:45.423287 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="60d394d2-ef23-4cd8-bc91-f26ec69c77e0" containerName="extract-content" Nov 21 15:21:45 crc kubenswrapper[5133]: E1121 15:21:45.423304 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="27c7b98c-b7c4-4f9d-bf5e-e0c7b514253f" containerName="extract-content" Nov 21 15:21:45 crc kubenswrapper[5133]: I1121 15:21:45.423312 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="27c7b98c-b7c4-4f9d-bf5e-e0c7b514253f" containerName="extract-content" Nov 21 15:21:45 crc kubenswrapper[5133]: E1121 15:21:45.423324 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="27c7b98c-b7c4-4f9d-bf5e-e0c7b514253f" containerName="extract-utilities" Nov 21 15:21:45 crc kubenswrapper[5133]: I1121 15:21:45.423330 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="27c7b98c-b7c4-4f9d-bf5e-e0c7b514253f" containerName="extract-utilities" Nov 21 15:21:45 crc kubenswrapper[5133]: E1121 15:21:45.423346 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="60d394d2-ef23-4cd8-bc91-f26ec69c77e0" containerName="extract-utilities" Nov 21 15:21:45 crc kubenswrapper[5133]: I1121 15:21:45.423352 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="60d394d2-ef23-4cd8-bc91-f26ec69c77e0" containerName="extract-utilities" Nov 21 15:21:45 crc kubenswrapper[5133]: E1121 15:21:45.423376 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="27c7b98c-b7c4-4f9d-bf5e-e0c7b514253f" containerName="registry-server" Nov 21 15:21:45 crc kubenswrapper[5133]: I1121 15:21:45.423382 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="27c7b98c-b7c4-4f9d-bf5e-e0c7b514253f" containerName="registry-server" Nov 21 15:21:45 crc kubenswrapper[5133]: E1121 15:21:45.423394 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="60d394d2-ef23-4cd8-bc91-f26ec69c77e0" containerName="registry-server" Nov 21 15:21:45 crc kubenswrapper[5133]: I1121 15:21:45.423401 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="60d394d2-ef23-4cd8-bc91-f26ec69c77e0" containerName="registry-server" Nov 21 15:21:45 crc kubenswrapper[5133]: I1121 15:21:45.423606 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="60d394d2-ef23-4cd8-bc91-f26ec69c77e0" containerName="registry-server" Nov 21 15:21:45 crc kubenswrapper[5133]: I1121 15:21:45.423622 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="27c7b98c-b7c4-4f9d-bf5e-e0c7b514253f" containerName="registry-server" Nov 21 15:21:45 crc kubenswrapper[5133]: I1121 15:21:45.425051 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dmf5x" Nov 21 15:21:45 crc kubenswrapper[5133]: I1121 15:21:45.442788 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-dmf5x"] Nov 21 15:21:45 crc kubenswrapper[5133]: I1121 15:21:45.453082 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fdf2fd62-da98-4cb1-9d1b-cb43f44b58df-catalog-content\") pod \"certified-operators-dmf5x\" (UID: \"fdf2fd62-da98-4cb1-9d1b-cb43f44b58df\") " pod="openshift-marketplace/certified-operators-dmf5x" Nov 21 15:21:45 crc kubenswrapper[5133]: I1121 15:21:45.453128 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fdf2fd62-da98-4cb1-9d1b-cb43f44b58df-utilities\") pod \"certified-operators-dmf5x\" (UID: \"fdf2fd62-da98-4cb1-9d1b-cb43f44b58df\") " pod="openshift-marketplace/certified-operators-dmf5x" Nov 21 15:21:45 crc kubenswrapper[5133]: I1121 15:21:45.453241 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ppfd4\" (UniqueName: \"kubernetes.io/projected/fdf2fd62-da98-4cb1-9d1b-cb43f44b58df-kube-api-access-ppfd4\") pod \"certified-operators-dmf5x\" (UID: \"fdf2fd62-da98-4cb1-9d1b-cb43f44b58df\") " pod="openshift-marketplace/certified-operators-dmf5x" Nov 21 15:21:45 crc kubenswrapper[5133]: I1121 15:21:45.555580 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ppfd4\" (UniqueName: \"kubernetes.io/projected/fdf2fd62-da98-4cb1-9d1b-cb43f44b58df-kube-api-access-ppfd4\") pod \"certified-operators-dmf5x\" (UID: \"fdf2fd62-da98-4cb1-9d1b-cb43f44b58df\") " pod="openshift-marketplace/certified-operators-dmf5x" Nov 21 15:21:45 crc kubenswrapper[5133]: I1121 15:21:45.555822 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fdf2fd62-da98-4cb1-9d1b-cb43f44b58df-catalog-content\") pod \"certified-operators-dmf5x\" (UID: \"fdf2fd62-da98-4cb1-9d1b-cb43f44b58df\") " pod="openshift-marketplace/certified-operators-dmf5x" Nov 21 15:21:45 crc kubenswrapper[5133]: I1121 15:21:45.555856 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fdf2fd62-da98-4cb1-9d1b-cb43f44b58df-utilities\") pod \"certified-operators-dmf5x\" (UID: \"fdf2fd62-da98-4cb1-9d1b-cb43f44b58df\") " pod="openshift-marketplace/certified-operators-dmf5x" Nov 21 15:21:45 crc kubenswrapper[5133]: I1121 15:21:45.556362 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fdf2fd62-da98-4cb1-9d1b-cb43f44b58df-utilities\") pod \"certified-operators-dmf5x\" (UID: \"fdf2fd62-da98-4cb1-9d1b-cb43f44b58df\") " pod="openshift-marketplace/certified-operators-dmf5x" Nov 21 15:21:45 crc kubenswrapper[5133]: I1121 15:21:45.556753 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fdf2fd62-da98-4cb1-9d1b-cb43f44b58df-catalog-content\") pod \"certified-operators-dmf5x\" (UID: \"fdf2fd62-da98-4cb1-9d1b-cb43f44b58df\") " pod="openshift-marketplace/certified-operators-dmf5x" Nov 21 15:21:45 crc kubenswrapper[5133]: I1121 15:21:45.587766 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ppfd4\" (UniqueName: \"kubernetes.io/projected/fdf2fd62-da98-4cb1-9d1b-cb43f44b58df-kube-api-access-ppfd4\") pod \"certified-operators-dmf5x\" (UID: \"fdf2fd62-da98-4cb1-9d1b-cb43f44b58df\") " pod="openshift-marketplace/certified-operators-dmf5x" Nov 21 15:21:45 crc kubenswrapper[5133]: I1121 15:21:45.748979 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dmf5x" Nov 21 15:21:46 crc kubenswrapper[5133]: I1121 15:21:46.260117 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-dmf5x"] Nov 21 15:21:46 crc kubenswrapper[5133]: I1121 15:21:46.414670 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dmf5x" event={"ID":"fdf2fd62-da98-4cb1-9d1b-cb43f44b58df","Type":"ContainerStarted","Data":"81b8d53ad5dff4232745455cbad92ebf471dd9a563f89d2f50a0fef37d360c63"} Nov 21 15:21:46 crc kubenswrapper[5133]: I1121 15:21:46.457904 5133 scope.go:117] "RemoveContainer" containerID="1d157b3c6af281d2ce3bea7313a2dc72022a8c1fac4535bc51f830a48161083c" Nov 21 15:21:46 crc kubenswrapper[5133]: E1121 15:21:46.458199 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:21:47 crc kubenswrapper[5133]: I1121 15:21:47.424229 5133 generic.go:334] "Generic (PLEG): container finished" podID="fdf2fd62-da98-4cb1-9d1b-cb43f44b58df" containerID="19fd2bfe75227609dffe82e978899986a2e6f6f33bfc3e717830ea8f84854a52" exitCode=0 Nov 21 15:21:47 crc kubenswrapper[5133]: I1121 15:21:47.424770 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dmf5x" event={"ID":"fdf2fd62-da98-4cb1-9d1b-cb43f44b58df","Type":"ContainerDied","Data":"19fd2bfe75227609dffe82e978899986a2e6f6f33bfc3e717830ea8f84854a52"} Nov 21 15:21:47 crc kubenswrapper[5133]: I1121 15:21:47.426977 5133 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 21 15:21:48 crc kubenswrapper[5133]: I1121 15:21:48.435585 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dmf5x" event={"ID":"fdf2fd62-da98-4cb1-9d1b-cb43f44b58df","Type":"ContainerStarted","Data":"4a5b62cb0aa082dff1c0b07d5d7b52f14dc819a284a042aa4f98756be03b63d2"} Nov 21 15:21:49 crc kubenswrapper[5133]: I1121 15:21:49.446671 5133 generic.go:334] "Generic (PLEG): container finished" podID="fdf2fd62-da98-4cb1-9d1b-cb43f44b58df" containerID="4a5b62cb0aa082dff1c0b07d5d7b52f14dc819a284a042aa4f98756be03b63d2" exitCode=0 Nov 21 15:21:49 crc kubenswrapper[5133]: I1121 15:21:49.447031 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dmf5x" event={"ID":"fdf2fd62-da98-4cb1-9d1b-cb43f44b58df","Type":"ContainerDied","Data":"4a5b62cb0aa082dff1c0b07d5d7b52f14dc819a284a042aa4f98756be03b63d2"} Nov 21 15:21:50 crc kubenswrapper[5133]: I1121 15:21:50.469046 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dmf5x" event={"ID":"fdf2fd62-da98-4cb1-9d1b-cb43f44b58df","Type":"ContainerStarted","Data":"9e1cff271510333c80fbdcc7dbbb604e5611cd309f55ae4dac8d12c8c8a70ef2"} Nov 21 15:21:50 crc kubenswrapper[5133]: I1121 15:21:50.494747 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-dmf5x" podStartSLOduration=3.102268177 podStartE2EDuration="5.494725487s" podCreationTimestamp="2025-11-21 15:21:45 +0000 UTC" firstStartedPulling="2025-11-21 15:21:47.426692001 +0000 UTC m=+5967.224524249" lastFinishedPulling="2025-11-21 15:21:49.819149311 +0000 UTC m=+5969.616981559" observedRunningTime="2025-11-21 15:21:50.486685603 +0000 UTC m=+5970.284517851" watchObservedRunningTime="2025-11-21 15:21:50.494725487 +0000 UTC m=+5970.292557735" Nov 21 15:21:55 crc kubenswrapper[5133]: I1121 15:21:55.749507 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-dmf5x" Nov 21 15:21:55 crc kubenswrapper[5133]: I1121 15:21:55.751158 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-dmf5x" Nov 21 15:21:55 crc kubenswrapper[5133]: I1121 15:21:55.808460 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-dmf5x" Nov 21 15:21:56 crc kubenswrapper[5133]: I1121 15:21:56.597359 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-dmf5x" Nov 21 15:21:56 crc kubenswrapper[5133]: I1121 15:21:56.648462 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-dmf5x"] Nov 21 15:21:58 crc kubenswrapper[5133]: I1121 15:21:58.548839 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-dmf5x" podUID="fdf2fd62-da98-4cb1-9d1b-cb43f44b58df" containerName="registry-server" containerID="cri-o://9e1cff271510333c80fbdcc7dbbb604e5611cd309f55ae4dac8d12c8c8a70ef2" gracePeriod=2 Nov 21 15:21:59 crc kubenswrapper[5133]: I1121 15:21:59.007664 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dmf5x" Nov 21 15:21:59 crc kubenswrapper[5133]: I1121 15:21:59.124161 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fdf2fd62-da98-4cb1-9d1b-cb43f44b58df-utilities\") pod \"fdf2fd62-da98-4cb1-9d1b-cb43f44b58df\" (UID: \"fdf2fd62-da98-4cb1-9d1b-cb43f44b58df\") " Nov 21 15:21:59 crc kubenswrapper[5133]: I1121 15:21:59.124612 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ppfd4\" (UniqueName: \"kubernetes.io/projected/fdf2fd62-da98-4cb1-9d1b-cb43f44b58df-kube-api-access-ppfd4\") pod \"fdf2fd62-da98-4cb1-9d1b-cb43f44b58df\" (UID: \"fdf2fd62-da98-4cb1-9d1b-cb43f44b58df\") " Nov 21 15:21:59 crc kubenswrapper[5133]: I1121 15:21:59.124656 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fdf2fd62-da98-4cb1-9d1b-cb43f44b58df-catalog-content\") pod \"fdf2fd62-da98-4cb1-9d1b-cb43f44b58df\" (UID: \"fdf2fd62-da98-4cb1-9d1b-cb43f44b58df\") " Nov 21 15:21:59 crc kubenswrapper[5133]: I1121 15:21:59.125049 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fdf2fd62-da98-4cb1-9d1b-cb43f44b58df-utilities" (OuterVolumeSpecName: "utilities") pod "fdf2fd62-da98-4cb1-9d1b-cb43f44b58df" (UID: "fdf2fd62-da98-4cb1-9d1b-cb43f44b58df"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:21:59 crc kubenswrapper[5133]: I1121 15:21:59.125251 5133 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fdf2fd62-da98-4cb1-9d1b-cb43f44b58df-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 15:21:59 crc kubenswrapper[5133]: I1121 15:21:59.130649 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fdf2fd62-da98-4cb1-9d1b-cb43f44b58df-kube-api-access-ppfd4" (OuterVolumeSpecName: "kube-api-access-ppfd4") pod "fdf2fd62-da98-4cb1-9d1b-cb43f44b58df" (UID: "fdf2fd62-da98-4cb1-9d1b-cb43f44b58df"). InnerVolumeSpecName "kube-api-access-ppfd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:21:59 crc kubenswrapper[5133]: I1121 15:21:59.185732 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fdf2fd62-da98-4cb1-9d1b-cb43f44b58df-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "fdf2fd62-da98-4cb1-9d1b-cb43f44b58df" (UID: "fdf2fd62-da98-4cb1-9d1b-cb43f44b58df"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:21:59 crc kubenswrapper[5133]: I1121 15:21:59.227528 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ppfd4\" (UniqueName: \"kubernetes.io/projected/fdf2fd62-da98-4cb1-9d1b-cb43f44b58df-kube-api-access-ppfd4\") on node \"crc\" DevicePath \"\"" Nov 21 15:21:59 crc kubenswrapper[5133]: I1121 15:21:59.227568 5133 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fdf2fd62-da98-4cb1-9d1b-cb43f44b58df-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 15:21:59 crc kubenswrapper[5133]: I1121 15:21:59.458183 5133 scope.go:117] "RemoveContainer" containerID="1d157b3c6af281d2ce3bea7313a2dc72022a8c1fac4535bc51f830a48161083c" Nov 21 15:21:59 crc kubenswrapper[5133]: E1121 15:21:59.458656 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:21:59 crc kubenswrapper[5133]: I1121 15:21:59.564804 5133 generic.go:334] "Generic (PLEG): container finished" podID="fdf2fd62-da98-4cb1-9d1b-cb43f44b58df" containerID="9e1cff271510333c80fbdcc7dbbb604e5611cd309f55ae4dac8d12c8c8a70ef2" exitCode=0 Nov 21 15:21:59 crc kubenswrapper[5133]: I1121 15:21:59.564864 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dmf5x" Nov 21 15:21:59 crc kubenswrapper[5133]: I1121 15:21:59.564882 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dmf5x" event={"ID":"fdf2fd62-da98-4cb1-9d1b-cb43f44b58df","Type":"ContainerDied","Data":"9e1cff271510333c80fbdcc7dbbb604e5611cd309f55ae4dac8d12c8c8a70ef2"} Nov 21 15:21:59 crc kubenswrapper[5133]: I1121 15:21:59.565348 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dmf5x" event={"ID":"fdf2fd62-da98-4cb1-9d1b-cb43f44b58df","Type":"ContainerDied","Data":"81b8d53ad5dff4232745455cbad92ebf471dd9a563f89d2f50a0fef37d360c63"} Nov 21 15:21:59 crc kubenswrapper[5133]: I1121 15:21:59.565370 5133 scope.go:117] "RemoveContainer" containerID="9e1cff271510333c80fbdcc7dbbb604e5611cd309f55ae4dac8d12c8c8a70ef2" Nov 21 15:21:59 crc kubenswrapper[5133]: I1121 15:21:59.597658 5133 scope.go:117] "RemoveContainer" containerID="4a5b62cb0aa082dff1c0b07d5d7b52f14dc819a284a042aa4f98756be03b63d2" Nov 21 15:21:59 crc kubenswrapper[5133]: I1121 15:21:59.610535 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-dmf5x"] Nov 21 15:21:59 crc kubenswrapper[5133]: I1121 15:21:59.621052 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-dmf5x"] Nov 21 15:21:59 crc kubenswrapper[5133]: I1121 15:21:59.630049 5133 scope.go:117] "RemoveContainer" containerID="19fd2bfe75227609dffe82e978899986a2e6f6f33bfc3e717830ea8f84854a52" Nov 21 15:21:59 crc kubenswrapper[5133]: I1121 15:21:59.656942 5133 scope.go:117] "RemoveContainer" containerID="9e1cff271510333c80fbdcc7dbbb604e5611cd309f55ae4dac8d12c8c8a70ef2" Nov 21 15:21:59 crc kubenswrapper[5133]: E1121 15:21:59.657475 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9e1cff271510333c80fbdcc7dbbb604e5611cd309f55ae4dac8d12c8c8a70ef2\": container with ID starting with 9e1cff271510333c80fbdcc7dbbb604e5611cd309f55ae4dac8d12c8c8a70ef2 not found: ID does not exist" containerID="9e1cff271510333c80fbdcc7dbbb604e5611cd309f55ae4dac8d12c8c8a70ef2" Nov 21 15:21:59 crc kubenswrapper[5133]: I1121 15:21:59.657548 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9e1cff271510333c80fbdcc7dbbb604e5611cd309f55ae4dac8d12c8c8a70ef2"} err="failed to get container status \"9e1cff271510333c80fbdcc7dbbb604e5611cd309f55ae4dac8d12c8c8a70ef2\": rpc error: code = NotFound desc = could not find container \"9e1cff271510333c80fbdcc7dbbb604e5611cd309f55ae4dac8d12c8c8a70ef2\": container with ID starting with 9e1cff271510333c80fbdcc7dbbb604e5611cd309f55ae4dac8d12c8c8a70ef2 not found: ID does not exist" Nov 21 15:21:59 crc kubenswrapper[5133]: I1121 15:21:59.657596 5133 scope.go:117] "RemoveContainer" containerID="4a5b62cb0aa082dff1c0b07d5d7b52f14dc819a284a042aa4f98756be03b63d2" Nov 21 15:21:59 crc kubenswrapper[5133]: E1121 15:21:59.658044 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4a5b62cb0aa082dff1c0b07d5d7b52f14dc819a284a042aa4f98756be03b63d2\": container with ID starting with 4a5b62cb0aa082dff1c0b07d5d7b52f14dc819a284a042aa4f98756be03b63d2 not found: ID does not exist" containerID="4a5b62cb0aa082dff1c0b07d5d7b52f14dc819a284a042aa4f98756be03b63d2" Nov 21 15:21:59 crc kubenswrapper[5133]: I1121 15:21:59.658071 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4a5b62cb0aa082dff1c0b07d5d7b52f14dc819a284a042aa4f98756be03b63d2"} err="failed to get container status \"4a5b62cb0aa082dff1c0b07d5d7b52f14dc819a284a042aa4f98756be03b63d2\": rpc error: code = NotFound desc = could not find container \"4a5b62cb0aa082dff1c0b07d5d7b52f14dc819a284a042aa4f98756be03b63d2\": container with ID starting with 4a5b62cb0aa082dff1c0b07d5d7b52f14dc819a284a042aa4f98756be03b63d2 not found: ID does not exist" Nov 21 15:21:59 crc kubenswrapper[5133]: I1121 15:21:59.658087 5133 scope.go:117] "RemoveContainer" containerID="19fd2bfe75227609dffe82e978899986a2e6f6f33bfc3e717830ea8f84854a52" Nov 21 15:21:59 crc kubenswrapper[5133]: E1121 15:21:59.658444 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"19fd2bfe75227609dffe82e978899986a2e6f6f33bfc3e717830ea8f84854a52\": container with ID starting with 19fd2bfe75227609dffe82e978899986a2e6f6f33bfc3e717830ea8f84854a52 not found: ID does not exist" containerID="19fd2bfe75227609dffe82e978899986a2e6f6f33bfc3e717830ea8f84854a52" Nov 21 15:21:59 crc kubenswrapper[5133]: I1121 15:21:59.658493 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"19fd2bfe75227609dffe82e978899986a2e6f6f33bfc3e717830ea8f84854a52"} err="failed to get container status \"19fd2bfe75227609dffe82e978899986a2e6f6f33bfc3e717830ea8f84854a52\": rpc error: code = NotFound desc = could not find container \"19fd2bfe75227609dffe82e978899986a2e6f6f33bfc3e717830ea8f84854a52\": container with ID starting with 19fd2bfe75227609dffe82e978899986a2e6f6f33bfc3e717830ea8f84854a52 not found: ID does not exist" Nov 21 15:22:00 crc kubenswrapper[5133]: I1121 15:22:00.470677 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fdf2fd62-da98-4cb1-9d1b-cb43f44b58df" path="/var/lib/kubelet/pods/fdf2fd62-da98-4cb1-9d1b-cb43f44b58df/volumes" Nov 21 15:22:10 crc kubenswrapper[5133]: I1121 15:22:10.460532 5133 scope.go:117] "RemoveContainer" containerID="1d157b3c6af281d2ce3bea7313a2dc72022a8c1fac4535bc51f830a48161083c" Nov 21 15:22:10 crc kubenswrapper[5133]: E1121 15:22:10.461328 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:22:21 crc kubenswrapper[5133]: I1121 15:22:21.458975 5133 scope.go:117] "RemoveContainer" containerID="1d157b3c6af281d2ce3bea7313a2dc72022a8c1fac4535bc51f830a48161083c" Nov 21 15:22:21 crc kubenswrapper[5133]: E1121 15:22:21.459903 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:22:35 crc kubenswrapper[5133]: I1121 15:22:35.458371 5133 scope.go:117] "RemoveContainer" containerID="1d157b3c6af281d2ce3bea7313a2dc72022a8c1fac4535bc51f830a48161083c" Nov 21 15:22:35 crc kubenswrapper[5133]: E1121 15:22:35.459787 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:22:50 crc kubenswrapper[5133]: I1121 15:22:50.458435 5133 scope.go:117] "RemoveContainer" containerID="1d157b3c6af281d2ce3bea7313a2dc72022a8c1fac4535bc51f830a48161083c" Nov 21 15:22:50 crc kubenswrapper[5133]: E1121 15:22:50.459296 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:23:01 crc kubenswrapper[5133]: I1121 15:23:01.458574 5133 scope.go:117] "RemoveContainer" containerID="1d157b3c6af281d2ce3bea7313a2dc72022a8c1fac4535bc51f830a48161083c" Nov 21 15:23:01 crc kubenswrapper[5133]: E1121 15:23:01.459404 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:23:13 crc kubenswrapper[5133]: I1121 15:23:13.458615 5133 scope.go:117] "RemoveContainer" containerID="1d157b3c6af281d2ce3bea7313a2dc72022a8c1fac4535bc51f830a48161083c" Nov 21 15:23:13 crc kubenswrapper[5133]: E1121 15:23:13.459644 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:23:28 crc kubenswrapper[5133]: I1121 15:23:28.458133 5133 scope.go:117] "RemoveContainer" containerID="1d157b3c6af281d2ce3bea7313a2dc72022a8c1fac4535bc51f830a48161083c" Nov 21 15:23:28 crc kubenswrapper[5133]: E1121 15:23:28.459120 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:23:42 crc kubenswrapper[5133]: I1121 15:23:42.471753 5133 scope.go:117] "RemoveContainer" containerID="1d157b3c6af281d2ce3bea7313a2dc72022a8c1fac4535bc51f830a48161083c" Nov 21 15:23:42 crc kubenswrapper[5133]: E1121 15:23:42.472740 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:23:57 crc kubenswrapper[5133]: I1121 15:23:57.457525 5133 scope.go:117] "RemoveContainer" containerID="1d157b3c6af281d2ce3bea7313a2dc72022a8c1fac4535bc51f830a48161083c" Nov 21 15:23:57 crc kubenswrapper[5133]: E1121 15:23:57.459988 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:24:11 crc kubenswrapper[5133]: I1121 15:24:11.458940 5133 scope.go:117] "RemoveContainer" containerID="1d157b3c6af281d2ce3bea7313a2dc72022a8c1fac4535bc51f830a48161083c" Nov 21 15:24:11 crc kubenswrapper[5133]: E1121 15:24:11.460358 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:24:24 crc kubenswrapper[5133]: I1121 15:24:24.457675 5133 scope.go:117] "RemoveContainer" containerID="1d157b3c6af281d2ce3bea7313a2dc72022a8c1fac4535bc51f830a48161083c" Nov 21 15:24:29 crc kubenswrapper[5133]: I1121 15:24:29.377344 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-xqxtt"] Nov 21 15:24:29 crc kubenswrapper[5133]: E1121 15:24:29.379877 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fdf2fd62-da98-4cb1-9d1b-cb43f44b58df" containerName="extract-utilities" Nov 21 15:24:29 crc kubenswrapper[5133]: I1121 15:24:29.380086 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="fdf2fd62-da98-4cb1-9d1b-cb43f44b58df" containerName="extract-utilities" Nov 21 15:24:29 crc kubenswrapper[5133]: E1121 15:24:29.380252 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fdf2fd62-da98-4cb1-9d1b-cb43f44b58df" containerName="extract-content" Nov 21 15:24:29 crc kubenswrapper[5133]: I1121 15:24:29.380370 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="fdf2fd62-da98-4cb1-9d1b-cb43f44b58df" containerName="extract-content" Nov 21 15:24:29 crc kubenswrapper[5133]: E1121 15:24:29.380521 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fdf2fd62-da98-4cb1-9d1b-cb43f44b58df" containerName="registry-server" Nov 21 15:24:29 crc kubenswrapper[5133]: I1121 15:24:29.380678 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="fdf2fd62-da98-4cb1-9d1b-cb43f44b58df" containerName="registry-server" Nov 21 15:24:29 crc kubenswrapper[5133]: I1121 15:24:29.381191 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="fdf2fd62-da98-4cb1-9d1b-cb43f44b58df" containerName="registry-server" Nov 21 15:24:29 crc kubenswrapper[5133]: I1121 15:24:29.383409 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xqxtt" Nov 21 15:24:29 crc kubenswrapper[5133]: I1121 15:24:29.391823 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-xqxtt"] Nov 21 15:24:29 crc kubenswrapper[5133]: I1121 15:24:29.527589 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-knkzx\" (UniqueName: \"kubernetes.io/projected/01034df4-2fc6-4db2-8a45-f944f049f262-kube-api-access-knkzx\") pod \"redhat-marketplace-xqxtt\" (UID: \"01034df4-2fc6-4db2-8a45-f944f049f262\") " pod="openshift-marketplace/redhat-marketplace-xqxtt" Nov 21 15:24:29 crc kubenswrapper[5133]: I1121 15:24:29.527799 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/01034df4-2fc6-4db2-8a45-f944f049f262-catalog-content\") pod \"redhat-marketplace-xqxtt\" (UID: \"01034df4-2fc6-4db2-8a45-f944f049f262\") " pod="openshift-marketplace/redhat-marketplace-xqxtt" Nov 21 15:24:29 crc kubenswrapper[5133]: I1121 15:24:29.527937 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/01034df4-2fc6-4db2-8a45-f944f049f262-utilities\") pod \"redhat-marketplace-xqxtt\" (UID: \"01034df4-2fc6-4db2-8a45-f944f049f262\") " pod="openshift-marketplace/redhat-marketplace-xqxtt" Nov 21 15:24:29 crc kubenswrapper[5133]: I1121 15:24:29.629242 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/01034df4-2fc6-4db2-8a45-f944f049f262-catalog-content\") pod \"redhat-marketplace-xqxtt\" (UID: \"01034df4-2fc6-4db2-8a45-f944f049f262\") " pod="openshift-marketplace/redhat-marketplace-xqxtt" Nov 21 15:24:29 crc kubenswrapper[5133]: I1121 15:24:29.629339 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/01034df4-2fc6-4db2-8a45-f944f049f262-utilities\") pod \"redhat-marketplace-xqxtt\" (UID: \"01034df4-2fc6-4db2-8a45-f944f049f262\") " pod="openshift-marketplace/redhat-marketplace-xqxtt" Nov 21 15:24:29 crc kubenswrapper[5133]: I1121 15:24:29.629467 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-knkzx\" (UniqueName: \"kubernetes.io/projected/01034df4-2fc6-4db2-8a45-f944f049f262-kube-api-access-knkzx\") pod \"redhat-marketplace-xqxtt\" (UID: \"01034df4-2fc6-4db2-8a45-f944f049f262\") " pod="openshift-marketplace/redhat-marketplace-xqxtt" Nov 21 15:24:29 crc kubenswrapper[5133]: I1121 15:24:29.629768 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/01034df4-2fc6-4db2-8a45-f944f049f262-catalog-content\") pod \"redhat-marketplace-xqxtt\" (UID: \"01034df4-2fc6-4db2-8a45-f944f049f262\") " pod="openshift-marketplace/redhat-marketplace-xqxtt" Nov 21 15:24:29 crc kubenswrapper[5133]: I1121 15:24:29.630191 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/01034df4-2fc6-4db2-8a45-f944f049f262-utilities\") pod \"redhat-marketplace-xqxtt\" (UID: \"01034df4-2fc6-4db2-8a45-f944f049f262\") " pod="openshift-marketplace/redhat-marketplace-xqxtt" Nov 21 15:24:29 crc kubenswrapper[5133]: I1121 15:24:29.649587 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-knkzx\" (UniqueName: \"kubernetes.io/projected/01034df4-2fc6-4db2-8a45-f944f049f262-kube-api-access-knkzx\") pod \"redhat-marketplace-xqxtt\" (UID: \"01034df4-2fc6-4db2-8a45-f944f049f262\") " pod="openshift-marketplace/redhat-marketplace-xqxtt" Nov 21 15:24:29 crc kubenswrapper[5133]: I1121 15:24:29.725864 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xqxtt" Nov 21 15:24:30 crc kubenswrapper[5133]: I1121 15:24:30.166186 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-xqxtt"] Nov 21 15:24:31 crc kubenswrapper[5133]: I1121 15:24:31.103836 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xqxtt" event={"ID":"01034df4-2fc6-4db2-8a45-f944f049f262","Type":"ContainerStarted","Data":"1167edb41368ee18f4edc995e8e2aef09a144f6430d3b3126f7f56f34916fcd6"} Nov 21 15:24:34 crc kubenswrapper[5133]: I1121 15:24:34.946332 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/cinder-volume-volume1-0" podUID="3bf15e01-8975-4617-b353-163613da4bc5" containerName="cinder-volume" probeResult="failure" output="Get \"http://10.217.0.232:8080/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 21 15:24:35 crc kubenswrapper[5133]: I1121 15:24:35.035327 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/cinder-backup-0" podUID="656b7d50-9049-415a-a4b1-08b531893110" containerName="cinder-backup" probeResult="failure" output="Get \"http://10.217.0.233:8080/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 21 15:24:36 crc kubenswrapper[5133]: I1121 15:24:36.537208 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/cinder-scheduler-0" podUID="46d511f0-7077-446e-b1f6-941fd109c41c" containerName="cinder-scheduler" probeResult="failure" output="Get \"http://10.217.0.153:8080/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 21 15:24:38 crc kubenswrapper[5133]: I1121 15:24:38.206443 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/ceilometer-0" podUID="404313b1-20dd-4623-ad80-ab9e2b521526" containerName="ceilometer-central-agent" probeResult="failure" output="command timed out" Nov 21 15:24:39 crc kubenswrapper[5133]: I1121 15:24:39.989180 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/cinder-volume-volume1-0" podUID="3bf15e01-8975-4617-b353-163613da4bc5" containerName="cinder-volume" probeResult="failure" output="Get \"http://10.217.0.232:8080/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 21 15:24:40 crc kubenswrapper[5133]: I1121 15:24:40.077281 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/cinder-backup-0" podUID="656b7d50-9049-415a-a4b1-08b531893110" containerName="cinder-backup" probeResult="failure" output="Get \"http://10.217.0.233:8080/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 21 15:24:40 crc kubenswrapper[5133]: I1121 15:24:40.881907 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/ceilometer-0" podUID="404313b1-20dd-4623-ad80-ab9e2b521526" containerName="ceilometer-central-agent" probeResult="failure" output=< Nov 21 15:24:40 crc kubenswrapper[5133]: Unkown error: Expecting value: line 1 column 1 (char 0) Nov 21 15:24:40 crc kubenswrapper[5133]: > Nov 21 15:24:41 crc kubenswrapper[5133]: I1121 15:24:41.216530 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" event={"ID":"52f5a729-05d1-4f84-a216-1df3233af57d","Type":"ContainerStarted","Data":"0cb9ffeafef00afac56a59dc163a419de0e518b8c46e5190c87e02de2302575b"} Nov 21 15:24:41 crc kubenswrapper[5133]: I1121 15:24:41.578182 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/cinder-scheduler-0" podUID="46d511f0-7077-446e-b1f6-941fd109c41c" containerName="cinder-scheduler" probeResult="failure" output="Get \"http://10.217.0.153:8080/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 21 15:24:42 crc kubenswrapper[5133]: I1121 15:24:42.226391 5133 generic.go:334] "Generic (PLEG): container finished" podID="01034df4-2fc6-4db2-8a45-f944f049f262" containerID="ef267e5bca7d92953c0f4378a4ff0ba2e3eb8388b5094584189db358b7fec4f5" exitCode=0 Nov 21 15:24:42 crc kubenswrapper[5133]: I1121 15:24:42.226446 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xqxtt" event={"ID":"01034df4-2fc6-4db2-8a45-f944f049f262","Type":"ContainerDied","Data":"ef267e5bca7d92953c0f4378a4ff0ba2e3eb8388b5094584189db358b7fec4f5"} Nov 21 15:24:42 crc kubenswrapper[5133]: I1121 15:24:42.684255 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/ceilometer-0" podUID="404313b1-20dd-4623-ad80-ab9e2b521526" containerName="ceilometer-central-agent" probeResult="failure" output=< Nov 21 15:24:42 crc kubenswrapper[5133]: Unkown error: Expecting value: line 1 column 1 (char 0) Nov 21 15:24:42 crc kubenswrapper[5133]: > Nov 21 15:24:42 crc kubenswrapper[5133]: I1121 15:24:42.684649 5133 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/ceilometer-0" Nov 21 15:24:42 crc kubenswrapper[5133]: I1121 15:24:42.685402 5133 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="ceilometer-central-agent" containerStatusID={"Type":"cri-o","ID":"5c20888c26e62ce81d102b766dd9812cc7fb315fb346cb04a43639e28354e6bf"} pod="openstack/ceilometer-0" containerMessage="Container ceilometer-central-agent failed liveness probe, will be restarted" Nov 21 15:24:42 crc kubenswrapper[5133]: I1121 15:24:42.685480 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="404313b1-20dd-4623-ad80-ab9e2b521526" containerName="ceilometer-central-agent" containerID="cri-o://5c20888c26e62ce81d102b766dd9812cc7fb315fb346cb04a43639e28354e6bf" gracePeriod=30 Nov 21 15:24:43 crc kubenswrapper[5133]: I1121 15:24:43.240851 5133 generic.go:334] "Generic (PLEG): container finished" podID="404313b1-20dd-4623-ad80-ab9e2b521526" containerID="5c20888c26e62ce81d102b766dd9812cc7fb315fb346cb04a43639e28354e6bf" exitCode=0 Nov 21 15:24:43 crc kubenswrapper[5133]: I1121 15:24:43.241275 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"404313b1-20dd-4623-ad80-ab9e2b521526","Type":"ContainerDied","Data":"5c20888c26e62ce81d102b766dd9812cc7fb315fb346cb04a43639e28354e6bf"} Nov 21 15:24:44 crc kubenswrapper[5133]: I1121 15:24:44.254118 5133 generic.go:334] "Generic (PLEG): container finished" podID="01034df4-2fc6-4db2-8a45-f944f049f262" containerID="b63d019b480112399f7a28dba5dd4dafbee6e4cd35349dd98e7b32d2d67cd9be" exitCode=0 Nov 21 15:24:44 crc kubenswrapper[5133]: I1121 15:24:44.254209 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xqxtt" event={"ID":"01034df4-2fc6-4db2-8a45-f944f049f262","Type":"ContainerDied","Data":"b63d019b480112399f7a28dba5dd4dafbee6e4cd35349dd98e7b32d2d67cd9be"} Nov 21 15:24:45 crc kubenswrapper[5133]: I1121 15:24:45.265620 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"404313b1-20dd-4623-ad80-ab9e2b521526","Type":"ContainerStarted","Data":"9337f13200df2713577c72f7b611fad68fb897f0ef65aee38981211e0fce0743"} Nov 21 15:24:46 crc kubenswrapper[5133]: I1121 15:24:46.280101 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xqxtt" event={"ID":"01034df4-2fc6-4db2-8a45-f944f049f262","Type":"ContainerStarted","Data":"e52401dc91d81432184fd3498337548d74b75c4197a6766d5f817f5d254f99ab"} Nov 21 15:24:46 crc kubenswrapper[5133]: I1121 15:24:46.306955 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-xqxtt" podStartSLOduration=14.444711188 podStartE2EDuration="17.306936165s" podCreationTimestamp="2025-11-21 15:24:29 +0000 UTC" firstStartedPulling="2025-11-21 15:24:42.229676483 +0000 UTC m=+6142.027508731" lastFinishedPulling="2025-11-21 15:24:45.09190146 +0000 UTC m=+6144.889733708" observedRunningTime="2025-11-21 15:24:46.296942282 +0000 UTC m=+6146.094774540" watchObservedRunningTime="2025-11-21 15:24:46.306936165 +0000 UTC m=+6146.104768413" Nov 21 15:24:49 crc kubenswrapper[5133]: I1121 15:24:49.727019 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-xqxtt" Nov 21 15:24:49 crc kubenswrapper[5133]: I1121 15:24:49.727580 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-xqxtt" Nov 21 15:24:49 crc kubenswrapper[5133]: I1121 15:24:49.771137 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-xqxtt" Nov 21 15:24:50 crc kubenswrapper[5133]: I1121 15:24:50.363744 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-xqxtt" Nov 21 15:24:50 crc kubenswrapper[5133]: I1121 15:24:50.429724 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-xqxtt"] Nov 21 15:24:52 crc kubenswrapper[5133]: I1121 15:24:52.332678 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-xqxtt" podUID="01034df4-2fc6-4db2-8a45-f944f049f262" containerName="registry-server" containerID="cri-o://e52401dc91d81432184fd3498337548d74b75c4197a6766d5f817f5d254f99ab" gracePeriod=2 Nov 21 15:24:52 crc kubenswrapper[5133]: I1121 15:24:52.960128 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xqxtt" Nov 21 15:24:53 crc kubenswrapper[5133]: I1121 15:24:53.021089 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-knkzx\" (UniqueName: \"kubernetes.io/projected/01034df4-2fc6-4db2-8a45-f944f049f262-kube-api-access-knkzx\") pod \"01034df4-2fc6-4db2-8a45-f944f049f262\" (UID: \"01034df4-2fc6-4db2-8a45-f944f049f262\") " Nov 21 15:24:53 crc kubenswrapper[5133]: I1121 15:24:53.021288 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/01034df4-2fc6-4db2-8a45-f944f049f262-catalog-content\") pod \"01034df4-2fc6-4db2-8a45-f944f049f262\" (UID: \"01034df4-2fc6-4db2-8a45-f944f049f262\") " Nov 21 15:24:53 crc kubenswrapper[5133]: I1121 15:24:53.021447 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/01034df4-2fc6-4db2-8a45-f944f049f262-utilities\") pod \"01034df4-2fc6-4db2-8a45-f944f049f262\" (UID: \"01034df4-2fc6-4db2-8a45-f944f049f262\") " Nov 21 15:24:53 crc kubenswrapper[5133]: I1121 15:24:53.022502 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/01034df4-2fc6-4db2-8a45-f944f049f262-utilities" (OuterVolumeSpecName: "utilities") pod "01034df4-2fc6-4db2-8a45-f944f049f262" (UID: "01034df4-2fc6-4db2-8a45-f944f049f262"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:24:53 crc kubenswrapper[5133]: I1121 15:24:53.031769 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01034df4-2fc6-4db2-8a45-f944f049f262-kube-api-access-knkzx" (OuterVolumeSpecName: "kube-api-access-knkzx") pod "01034df4-2fc6-4db2-8a45-f944f049f262" (UID: "01034df4-2fc6-4db2-8a45-f944f049f262"). InnerVolumeSpecName "kube-api-access-knkzx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:24:53 crc kubenswrapper[5133]: I1121 15:24:53.045098 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/01034df4-2fc6-4db2-8a45-f944f049f262-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "01034df4-2fc6-4db2-8a45-f944f049f262" (UID: "01034df4-2fc6-4db2-8a45-f944f049f262"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:24:53 crc kubenswrapper[5133]: I1121 15:24:53.123475 5133 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/01034df4-2fc6-4db2-8a45-f944f049f262-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 15:24:53 crc kubenswrapper[5133]: I1121 15:24:53.123503 5133 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/01034df4-2fc6-4db2-8a45-f944f049f262-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 15:24:53 crc kubenswrapper[5133]: I1121 15:24:53.123512 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-knkzx\" (UniqueName: \"kubernetes.io/projected/01034df4-2fc6-4db2-8a45-f944f049f262-kube-api-access-knkzx\") on node \"crc\" DevicePath \"\"" Nov 21 15:24:53 crc kubenswrapper[5133]: I1121 15:24:53.345042 5133 generic.go:334] "Generic (PLEG): container finished" podID="01034df4-2fc6-4db2-8a45-f944f049f262" containerID="e52401dc91d81432184fd3498337548d74b75c4197a6766d5f817f5d254f99ab" exitCode=0 Nov 21 15:24:53 crc kubenswrapper[5133]: I1121 15:24:53.345085 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xqxtt" event={"ID":"01034df4-2fc6-4db2-8a45-f944f049f262","Type":"ContainerDied","Data":"e52401dc91d81432184fd3498337548d74b75c4197a6766d5f817f5d254f99ab"} Nov 21 15:24:53 crc kubenswrapper[5133]: I1121 15:24:53.345115 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xqxtt" event={"ID":"01034df4-2fc6-4db2-8a45-f944f049f262","Type":"ContainerDied","Data":"1167edb41368ee18f4edc995e8e2aef09a144f6430d3b3126f7f56f34916fcd6"} Nov 21 15:24:53 crc kubenswrapper[5133]: I1121 15:24:53.345137 5133 scope.go:117] "RemoveContainer" containerID="e52401dc91d81432184fd3498337548d74b75c4197a6766d5f817f5d254f99ab" Nov 21 15:24:53 crc kubenswrapper[5133]: I1121 15:24:53.345179 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xqxtt" Nov 21 15:24:53 crc kubenswrapper[5133]: I1121 15:24:53.371539 5133 scope.go:117] "RemoveContainer" containerID="b63d019b480112399f7a28dba5dd4dafbee6e4cd35349dd98e7b32d2d67cd9be" Nov 21 15:24:53 crc kubenswrapper[5133]: I1121 15:24:53.383645 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-xqxtt"] Nov 21 15:24:53 crc kubenswrapper[5133]: I1121 15:24:53.396990 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-xqxtt"] Nov 21 15:24:53 crc kubenswrapper[5133]: I1121 15:24:53.406447 5133 scope.go:117] "RemoveContainer" containerID="ef267e5bca7d92953c0f4378a4ff0ba2e3eb8388b5094584189db358b7fec4f5" Nov 21 15:24:53 crc kubenswrapper[5133]: I1121 15:24:53.451905 5133 scope.go:117] "RemoveContainer" containerID="e52401dc91d81432184fd3498337548d74b75c4197a6766d5f817f5d254f99ab" Nov 21 15:24:53 crc kubenswrapper[5133]: E1121 15:24:53.452435 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e52401dc91d81432184fd3498337548d74b75c4197a6766d5f817f5d254f99ab\": container with ID starting with e52401dc91d81432184fd3498337548d74b75c4197a6766d5f817f5d254f99ab not found: ID does not exist" containerID="e52401dc91d81432184fd3498337548d74b75c4197a6766d5f817f5d254f99ab" Nov 21 15:24:53 crc kubenswrapper[5133]: I1121 15:24:53.452484 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e52401dc91d81432184fd3498337548d74b75c4197a6766d5f817f5d254f99ab"} err="failed to get container status \"e52401dc91d81432184fd3498337548d74b75c4197a6766d5f817f5d254f99ab\": rpc error: code = NotFound desc = could not find container \"e52401dc91d81432184fd3498337548d74b75c4197a6766d5f817f5d254f99ab\": container with ID starting with e52401dc91d81432184fd3498337548d74b75c4197a6766d5f817f5d254f99ab not found: ID does not exist" Nov 21 15:24:53 crc kubenswrapper[5133]: I1121 15:24:53.452520 5133 scope.go:117] "RemoveContainer" containerID="b63d019b480112399f7a28dba5dd4dafbee6e4cd35349dd98e7b32d2d67cd9be" Nov 21 15:24:53 crc kubenswrapper[5133]: E1121 15:24:53.452877 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b63d019b480112399f7a28dba5dd4dafbee6e4cd35349dd98e7b32d2d67cd9be\": container with ID starting with b63d019b480112399f7a28dba5dd4dafbee6e4cd35349dd98e7b32d2d67cd9be not found: ID does not exist" containerID="b63d019b480112399f7a28dba5dd4dafbee6e4cd35349dd98e7b32d2d67cd9be" Nov 21 15:24:53 crc kubenswrapper[5133]: I1121 15:24:53.452898 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b63d019b480112399f7a28dba5dd4dafbee6e4cd35349dd98e7b32d2d67cd9be"} err="failed to get container status \"b63d019b480112399f7a28dba5dd4dafbee6e4cd35349dd98e7b32d2d67cd9be\": rpc error: code = NotFound desc = could not find container \"b63d019b480112399f7a28dba5dd4dafbee6e4cd35349dd98e7b32d2d67cd9be\": container with ID starting with b63d019b480112399f7a28dba5dd4dafbee6e4cd35349dd98e7b32d2d67cd9be not found: ID does not exist" Nov 21 15:24:53 crc kubenswrapper[5133]: I1121 15:24:53.452912 5133 scope.go:117] "RemoveContainer" containerID="ef267e5bca7d92953c0f4378a4ff0ba2e3eb8388b5094584189db358b7fec4f5" Nov 21 15:24:53 crc kubenswrapper[5133]: E1121 15:24:53.453257 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ef267e5bca7d92953c0f4378a4ff0ba2e3eb8388b5094584189db358b7fec4f5\": container with ID starting with ef267e5bca7d92953c0f4378a4ff0ba2e3eb8388b5094584189db358b7fec4f5 not found: ID does not exist" containerID="ef267e5bca7d92953c0f4378a4ff0ba2e3eb8388b5094584189db358b7fec4f5" Nov 21 15:24:53 crc kubenswrapper[5133]: I1121 15:24:53.453313 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ef267e5bca7d92953c0f4378a4ff0ba2e3eb8388b5094584189db358b7fec4f5"} err="failed to get container status \"ef267e5bca7d92953c0f4378a4ff0ba2e3eb8388b5094584189db358b7fec4f5\": rpc error: code = NotFound desc = could not find container \"ef267e5bca7d92953c0f4378a4ff0ba2e3eb8388b5094584189db358b7fec4f5\": container with ID starting with ef267e5bca7d92953c0f4378a4ff0ba2e3eb8388b5094584189db358b7fec4f5 not found: ID does not exist" Nov 21 15:24:54 crc kubenswrapper[5133]: I1121 15:24:54.477968 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01034df4-2fc6-4db2-8a45-f944f049f262" path="/var/lib/kubelet/pods/01034df4-2fc6-4db2-8a45-f944f049f262/volumes" Nov 21 15:26:53 crc kubenswrapper[5133]: I1121 15:26:53.310814 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 15:26:53 crc kubenswrapper[5133]: I1121 15:26:53.311773 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 15:27:23 crc kubenswrapper[5133]: I1121 15:27:23.310601 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 15:27:23 crc kubenswrapper[5133]: I1121 15:27:23.311338 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 15:27:53 crc kubenswrapper[5133]: I1121 15:27:53.310455 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 15:27:53 crc kubenswrapper[5133]: I1121 15:27:53.311213 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 15:27:53 crc kubenswrapper[5133]: I1121 15:27:53.311288 5133 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" Nov 21 15:27:53 crc kubenswrapper[5133]: I1121 15:27:53.312574 5133 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"0cb9ffeafef00afac56a59dc163a419de0e518b8c46e5190c87e02de2302575b"} pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 15:27:53 crc kubenswrapper[5133]: I1121 15:27:53.312691 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" containerID="cri-o://0cb9ffeafef00afac56a59dc163a419de0e518b8c46e5190c87e02de2302575b" gracePeriod=600 Nov 21 15:27:56 crc kubenswrapper[5133]: I1121 15:27:56.682215 5133 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/frr-k8s-webhook-server-6998585d5-9drp9" podUID="67474cce-10bd-4da6-895f-a7e465d362a6" containerName="frr-k8s-webhook-server" probeResult="failure" output="Get \"http://10.217.0.51:7572/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 21 15:27:58 crc kubenswrapper[5133]: I1121 15:27:58.947324 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/cinder-volume-volume1-0" podUID="3bf15e01-8975-4617-b353-163613da4bc5" containerName="cinder-volume" probeResult="failure" output="Get \"http://10.217.0.232:8080/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 21 15:27:59 crc kubenswrapper[5133]: I1121 15:27:59.038350 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/cinder-backup-0" podUID="656b7d50-9049-415a-a4b1-08b531893110" containerName="cinder-backup" probeResult="failure" output="Get \"http://10.217.0.233:8080/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 21 15:28:00 crc kubenswrapper[5133]: I1121 15:28:00.538263 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/cinder-scheduler-0" podUID="46d511f0-7077-446e-b1f6-941fd109c41c" containerName="cinder-scheduler" probeResult="failure" output="Get \"http://10.217.0.153:8080/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 21 15:28:00 crc kubenswrapper[5133]: I1121 15:28:00.987789 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-config-operator_machine-config-daemon-xxlvp_52f5a729-05d1-4f84-a216-1df3233af57d/machine-config-daemon/20.log" Nov 21 15:28:00 crc kubenswrapper[5133]: I1121 15:28:00.989397 5133 generic.go:334] "Generic (PLEG): container finished" podID="52f5a729-05d1-4f84-a216-1df3233af57d" containerID="0cb9ffeafef00afac56a59dc163a419de0e518b8c46e5190c87e02de2302575b" exitCode=-1 Nov 21 15:28:00 crc kubenswrapper[5133]: I1121 15:28:00.989442 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" event={"ID":"52f5a729-05d1-4f84-a216-1df3233af57d","Type":"ContainerDied","Data":"0cb9ffeafef00afac56a59dc163a419de0e518b8c46e5190c87e02de2302575b"} Nov 21 15:28:00 crc kubenswrapper[5133]: I1121 15:28:00.989476 5133 scope.go:117] "RemoveContainer" containerID="1d157b3c6af281d2ce3bea7313a2dc72022a8c1fac4535bc51f830a48161083c" Nov 21 15:28:03 crc kubenswrapper[5133]: I1121 15:28:03.990253 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/cinder-volume-volume1-0" podUID="3bf15e01-8975-4617-b353-163613da4bc5" containerName="cinder-volume" probeResult="failure" output="Get \"http://10.217.0.232:8080/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 21 15:28:04 crc kubenswrapper[5133]: I1121 15:28:04.079217 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/cinder-backup-0" podUID="656b7d50-9049-415a-a4b1-08b531893110" containerName="cinder-backup" probeResult="failure" output="Get \"http://10.217.0.233:8080/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 21 15:28:05 crc kubenswrapper[5133]: I1121 15:28:05.581483 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/cinder-scheduler-0" podUID="46d511f0-7077-446e-b1f6-941fd109c41c" containerName="cinder-scheduler" probeResult="failure" output="Get \"http://10.217.0.153:8080/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 21 15:28:09 crc kubenswrapper[5133]: I1121 15:28:09.034241 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/cinder-volume-volume1-0" podUID="3bf15e01-8975-4617-b353-163613da4bc5" containerName="cinder-volume" probeResult="failure" output="Get \"http://10.217.0.232:8080/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 21 15:28:09 crc kubenswrapper[5133]: I1121 15:28:09.035015 5133 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/cinder-volume-volume1-0" Nov 21 15:28:09 crc kubenswrapper[5133]: I1121 15:28:09.036036 5133 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="cinder-volume" containerStatusID={"Type":"cri-o","ID":"6137efce3af68d874e0e5e04ef7a04c463d53b719a0d5e15cc4c152ed7bba220"} pod="openstack/cinder-volume-volume1-0" containerMessage="Container cinder-volume failed liveness probe, will be restarted" Nov 21 15:28:09 crc kubenswrapper[5133]: I1121 15:28:09.036100 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-volume-volume1-0" podUID="3bf15e01-8975-4617-b353-163613da4bc5" containerName="cinder-volume" containerID="cri-o://6137efce3af68d874e0e5e04ef7a04c463d53b719a0d5e15cc4c152ed7bba220" gracePeriod=30 Nov 21 15:28:09 crc kubenswrapper[5133]: I1121 15:28:09.080629 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" event={"ID":"52f5a729-05d1-4f84-a216-1df3233af57d","Type":"ContainerStarted","Data":"b88d3b7b176333c05c13a9dc77345d383250a38ab5512a29acb877d8889770b9"} Nov 21 15:28:14 crc kubenswrapper[5133]: I1121 15:28:14.137572 5133 generic.go:334] "Generic (PLEG): container finished" podID="3bf15e01-8975-4617-b353-163613da4bc5" containerID="6137efce3af68d874e0e5e04ef7a04c463d53b719a0d5e15cc4c152ed7bba220" exitCode=0 Nov 21 15:28:14 crc kubenswrapper[5133]: I1121 15:28:14.137652 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-volume1-0" event={"ID":"3bf15e01-8975-4617-b353-163613da4bc5","Type":"ContainerDied","Data":"6137efce3af68d874e0e5e04ef7a04c463d53b719a0d5e15cc4c152ed7bba220"} Nov 21 15:28:15 crc kubenswrapper[5133]: I1121 15:28:15.149610 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-volume1-0" event={"ID":"3bf15e01-8975-4617-b353-163613da4bc5","Type":"ContainerStarted","Data":"438e8ce8e8dee24f388d613d3cc9a67df66b293f538e5e18188c9b8b8cacbaa6"} Nov 21 15:28:18 crc kubenswrapper[5133]: I1121 15:28:18.905202 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-volume-volume1-0" Nov 21 15:28:23 crc kubenswrapper[5133]: I1121 15:28:23.911885 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-volume-volume1-0" Nov 21 15:29:30 crc kubenswrapper[5133]: I1121 15:29:30.914040 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-4x8sf"] Nov 21 15:29:30 crc kubenswrapper[5133]: E1121 15:29:30.915502 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="01034df4-2fc6-4db2-8a45-f944f049f262" containerName="extract-utilities" Nov 21 15:29:30 crc kubenswrapper[5133]: I1121 15:29:30.915521 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="01034df4-2fc6-4db2-8a45-f944f049f262" containerName="extract-utilities" Nov 21 15:29:30 crc kubenswrapper[5133]: E1121 15:29:30.915565 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="01034df4-2fc6-4db2-8a45-f944f049f262" containerName="extract-content" Nov 21 15:29:30 crc kubenswrapper[5133]: I1121 15:29:30.915575 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="01034df4-2fc6-4db2-8a45-f944f049f262" containerName="extract-content" Nov 21 15:29:30 crc kubenswrapper[5133]: E1121 15:29:30.915593 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="01034df4-2fc6-4db2-8a45-f944f049f262" containerName="registry-server" Nov 21 15:29:30 crc kubenswrapper[5133]: I1121 15:29:30.915602 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="01034df4-2fc6-4db2-8a45-f944f049f262" containerName="registry-server" Nov 21 15:29:30 crc kubenswrapper[5133]: I1121 15:29:30.915816 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="01034df4-2fc6-4db2-8a45-f944f049f262" containerName="registry-server" Nov 21 15:29:30 crc kubenswrapper[5133]: I1121 15:29:30.917813 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4x8sf" Nov 21 15:29:30 crc kubenswrapper[5133]: I1121 15:29:30.927229 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4x8sf"] Nov 21 15:29:30 crc kubenswrapper[5133]: I1121 15:29:30.956806 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8219de3f-0c86-4494-aa7c-d9cf106298f7-utilities\") pod \"community-operators-4x8sf\" (UID: \"8219de3f-0c86-4494-aa7c-d9cf106298f7\") " pod="openshift-marketplace/community-operators-4x8sf" Nov 21 15:29:30 crc kubenswrapper[5133]: I1121 15:29:30.956955 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v9fjg\" (UniqueName: \"kubernetes.io/projected/8219de3f-0c86-4494-aa7c-d9cf106298f7-kube-api-access-v9fjg\") pod \"community-operators-4x8sf\" (UID: \"8219de3f-0c86-4494-aa7c-d9cf106298f7\") " pod="openshift-marketplace/community-operators-4x8sf" Nov 21 15:29:30 crc kubenswrapper[5133]: I1121 15:29:30.957187 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8219de3f-0c86-4494-aa7c-d9cf106298f7-catalog-content\") pod \"community-operators-4x8sf\" (UID: \"8219de3f-0c86-4494-aa7c-d9cf106298f7\") " pod="openshift-marketplace/community-operators-4x8sf" Nov 21 15:29:31 crc kubenswrapper[5133]: I1121 15:29:31.063153 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8219de3f-0c86-4494-aa7c-d9cf106298f7-utilities\") pod \"community-operators-4x8sf\" (UID: \"8219de3f-0c86-4494-aa7c-d9cf106298f7\") " pod="openshift-marketplace/community-operators-4x8sf" Nov 21 15:29:31 crc kubenswrapper[5133]: I1121 15:29:31.063314 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v9fjg\" (UniqueName: \"kubernetes.io/projected/8219de3f-0c86-4494-aa7c-d9cf106298f7-kube-api-access-v9fjg\") pod \"community-operators-4x8sf\" (UID: \"8219de3f-0c86-4494-aa7c-d9cf106298f7\") " pod="openshift-marketplace/community-operators-4x8sf" Nov 21 15:29:31 crc kubenswrapper[5133]: I1121 15:29:31.063438 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8219de3f-0c86-4494-aa7c-d9cf106298f7-catalog-content\") pod \"community-operators-4x8sf\" (UID: \"8219de3f-0c86-4494-aa7c-d9cf106298f7\") " pod="openshift-marketplace/community-operators-4x8sf" Nov 21 15:29:31 crc kubenswrapper[5133]: I1121 15:29:31.064280 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8219de3f-0c86-4494-aa7c-d9cf106298f7-catalog-content\") pod \"community-operators-4x8sf\" (UID: \"8219de3f-0c86-4494-aa7c-d9cf106298f7\") " pod="openshift-marketplace/community-operators-4x8sf" Nov 21 15:29:31 crc kubenswrapper[5133]: I1121 15:29:31.064558 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8219de3f-0c86-4494-aa7c-d9cf106298f7-utilities\") pod \"community-operators-4x8sf\" (UID: \"8219de3f-0c86-4494-aa7c-d9cf106298f7\") " pod="openshift-marketplace/community-operators-4x8sf" Nov 21 15:29:31 crc kubenswrapper[5133]: I1121 15:29:31.101473 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v9fjg\" (UniqueName: \"kubernetes.io/projected/8219de3f-0c86-4494-aa7c-d9cf106298f7-kube-api-access-v9fjg\") pod \"community-operators-4x8sf\" (UID: \"8219de3f-0c86-4494-aa7c-d9cf106298f7\") " pod="openshift-marketplace/community-operators-4x8sf" Nov 21 15:29:31 crc kubenswrapper[5133]: I1121 15:29:31.250133 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4x8sf" Nov 21 15:29:31 crc kubenswrapper[5133]: I1121 15:29:31.721063 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4x8sf"] Nov 21 15:29:31 crc kubenswrapper[5133]: W1121 15:29:31.722785 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8219de3f_0c86_4494_aa7c_d9cf106298f7.slice/crio-243705603a1ead7f7d625188e10f875210edef692988d0814e30d82aca2ccce5 WatchSource:0}: Error finding container 243705603a1ead7f7d625188e10f875210edef692988d0814e30d82aca2ccce5: Status 404 returned error can't find the container with id 243705603a1ead7f7d625188e10f875210edef692988d0814e30d82aca2ccce5 Nov 21 15:29:31 crc kubenswrapper[5133]: I1121 15:29:31.870923 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4x8sf" event={"ID":"8219de3f-0c86-4494-aa7c-d9cf106298f7","Type":"ContainerStarted","Data":"243705603a1ead7f7d625188e10f875210edef692988d0814e30d82aca2ccce5"} Nov 21 15:29:32 crc kubenswrapper[5133]: I1121 15:29:32.884470 5133 generic.go:334] "Generic (PLEG): container finished" podID="8219de3f-0c86-4494-aa7c-d9cf106298f7" containerID="43b130fe06e215ca10301644dacf95faf3dd6f50279a205990effb6fdf91790c" exitCode=0 Nov 21 15:29:32 crc kubenswrapper[5133]: I1121 15:29:32.884531 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4x8sf" event={"ID":"8219de3f-0c86-4494-aa7c-d9cf106298f7","Type":"ContainerDied","Data":"43b130fe06e215ca10301644dacf95faf3dd6f50279a205990effb6fdf91790c"} Nov 21 15:29:32 crc kubenswrapper[5133]: I1121 15:29:32.887736 5133 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 21 15:29:33 crc kubenswrapper[5133]: I1121 15:29:33.895684 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4x8sf" event={"ID":"8219de3f-0c86-4494-aa7c-d9cf106298f7","Type":"ContainerStarted","Data":"020e23124e8b35852fa9043ea4ae2a02f2137e9f4e20c5dcb4449941434eb270"} Nov 21 15:29:34 crc kubenswrapper[5133]: I1121 15:29:34.914746 5133 generic.go:334] "Generic (PLEG): container finished" podID="8219de3f-0c86-4494-aa7c-d9cf106298f7" containerID="020e23124e8b35852fa9043ea4ae2a02f2137e9f4e20c5dcb4449941434eb270" exitCode=0 Nov 21 15:29:34 crc kubenswrapper[5133]: I1121 15:29:34.915144 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4x8sf" event={"ID":"8219de3f-0c86-4494-aa7c-d9cf106298f7","Type":"ContainerDied","Data":"020e23124e8b35852fa9043ea4ae2a02f2137e9f4e20c5dcb4449941434eb270"} Nov 21 15:29:35 crc kubenswrapper[5133]: I1121 15:29:35.926655 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4x8sf" event={"ID":"8219de3f-0c86-4494-aa7c-d9cf106298f7","Type":"ContainerStarted","Data":"1f32d9a547c3e89b1dbc117b3367dcef202e778d29472ba17c63c7c527061af7"} Nov 21 15:29:35 crc kubenswrapper[5133]: I1121 15:29:35.947239 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-4x8sf" podStartSLOduration=3.486514089 podStartE2EDuration="5.947220959s" podCreationTimestamp="2025-11-21 15:29:30 +0000 UTC" firstStartedPulling="2025-11-21 15:29:32.887349932 +0000 UTC m=+6432.685182210" lastFinishedPulling="2025-11-21 15:29:35.348056832 +0000 UTC m=+6435.145889080" observedRunningTime="2025-11-21 15:29:35.941783116 +0000 UTC m=+6435.739615364" watchObservedRunningTime="2025-11-21 15:29:35.947220959 +0000 UTC m=+6435.745053207" Nov 21 15:29:41 crc kubenswrapper[5133]: I1121 15:29:41.250563 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-4x8sf" Nov 21 15:29:41 crc kubenswrapper[5133]: I1121 15:29:41.251088 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-4x8sf" Nov 21 15:29:41 crc kubenswrapper[5133]: I1121 15:29:41.300221 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-4x8sf" Nov 21 15:29:42 crc kubenswrapper[5133]: I1121 15:29:42.067616 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-4x8sf" Nov 21 15:29:42 crc kubenswrapper[5133]: I1121 15:29:42.124733 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-4x8sf"] Nov 21 15:29:44 crc kubenswrapper[5133]: I1121 15:29:44.017203 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-4x8sf" podUID="8219de3f-0c86-4494-aa7c-d9cf106298f7" containerName="registry-server" containerID="cri-o://1f32d9a547c3e89b1dbc117b3367dcef202e778d29472ba17c63c7c527061af7" gracePeriod=2 Nov 21 15:29:44 crc kubenswrapper[5133]: I1121 15:29:44.508124 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4x8sf" Nov 21 15:29:44 crc kubenswrapper[5133]: I1121 15:29:44.545364 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v9fjg\" (UniqueName: \"kubernetes.io/projected/8219de3f-0c86-4494-aa7c-d9cf106298f7-kube-api-access-v9fjg\") pod \"8219de3f-0c86-4494-aa7c-d9cf106298f7\" (UID: \"8219de3f-0c86-4494-aa7c-d9cf106298f7\") " Nov 21 15:29:44 crc kubenswrapper[5133]: I1121 15:29:44.545960 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8219de3f-0c86-4494-aa7c-d9cf106298f7-utilities\") pod \"8219de3f-0c86-4494-aa7c-d9cf106298f7\" (UID: \"8219de3f-0c86-4494-aa7c-d9cf106298f7\") " Nov 21 15:29:44 crc kubenswrapper[5133]: I1121 15:29:44.546168 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8219de3f-0c86-4494-aa7c-d9cf106298f7-catalog-content\") pod \"8219de3f-0c86-4494-aa7c-d9cf106298f7\" (UID: \"8219de3f-0c86-4494-aa7c-d9cf106298f7\") " Nov 21 15:29:44 crc kubenswrapper[5133]: I1121 15:29:44.550670 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8219de3f-0c86-4494-aa7c-d9cf106298f7-utilities" (OuterVolumeSpecName: "utilities") pod "8219de3f-0c86-4494-aa7c-d9cf106298f7" (UID: "8219de3f-0c86-4494-aa7c-d9cf106298f7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:29:44 crc kubenswrapper[5133]: I1121 15:29:44.564877 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8219de3f-0c86-4494-aa7c-d9cf106298f7-kube-api-access-v9fjg" (OuterVolumeSpecName: "kube-api-access-v9fjg") pod "8219de3f-0c86-4494-aa7c-d9cf106298f7" (UID: "8219de3f-0c86-4494-aa7c-d9cf106298f7"). InnerVolumeSpecName "kube-api-access-v9fjg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:29:44 crc kubenswrapper[5133]: I1121 15:29:44.624256 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8219de3f-0c86-4494-aa7c-d9cf106298f7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8219de3f-0c86-4494-aa7c-d9cf106298f7" (UID: "8219de3f-0c86-4494-aa7c-d9cf106298f7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:29:44 crc kubenswrapper[5133]: I1121 15:29:44.649163 5133 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8219de3f-0c86-4494-aa7c-d9cf106298f7-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 15:29:44 crc kubenswrapper[5133]: I1121 15:29:44.649204 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v9fjg\" (UniqueName: \"kubernetes.io/projected/8219de3f-0c86-4494-aa7c-d9cf106298f7-kube-api-access-v9fjg\") on node \"crc\" DevicePath \"\"" Nov 21 15:29:44 crc kubenswrapper[5133]: I1121 15:29:44.649222 5133 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8219de3f-0c86-4494-aa7c-d9cf106298f7-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 15:29:45 crc kubenswrapper[5133]: I1121 15:29:45.028884 5133 generic.go:334] "Generic (PLEG): container finished" podID="8219de3f-0c86-4494-aa7c-d9cf106298f7" containerID="1f32d9a547c3e89b1dbc117b3367dcef202e778d29472ba17c63c7c527061af7" exitCode=0 Nov 21 15:29:45 crc kubenswrapper[5133]: I1121 15:29:45.028923 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4x8sf" event={"ID":"8219de3f-0c86-4494-aa7c-d9cf106298f7","Type":"ContainerDied","Data":"1f32d9a547c3e89b1dbc117b3367dcef202e778d29472ba17c63c7c527061af7"} Nov 21 15:29:45 crc kubenswrapper[5133]: I1121 15:29:45.028948 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4x8sf" event={"ID":"8219de3f-0c86-4494-aa7c-d9cf106298f7","Type":"ContainerDied","Data":"243705603a1ead7f7d625188e10f875210edef692988d0814e30d82aca2ccce5"} Nov 21 15:29:45 crc kubenswrapper[5133]: I1121 15:29:45.028969 5133 scope.go:117] "RemoveContainer" containerID="1f32d9a547c3e89b1dbc117b3367dcef202e778d29472ba17c63c7c527061af7" Nov 21 15:29:45 crc kubenswrapper[5133]: I1121 15:29:45.029035 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4x8sf" Nov 21 15:29:45 crc kubenswrapper[5133]: I1121 15:29:45.068160 5133 scope.go:117] "RemoveContainer" containerID="020e23124e8b35852fa9043ea4ae2a02f2137e9f4e20c5dcb4449941434eb270" Nov 21 15:29:45 crc kubenswrapper[5133]: I1121 15:29:45.097554 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-4x8sf"] Nov 21 15:29:45 crc kubenswrapper[5133]: I1121 15:29:45.107454 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-4x8sf"] Nov 21 15:29:45 crc kubenswrapper[5133]: I1121 15:29:45.116298 5133 scope.go:117] "RemoveContainer" containerID="43b130fe06e215ca10301644dacf95faf3dd6f50279a205990effb6fdf91790c" Nov 21 15:29:45 crc kubenswrapper[5133]: I1121 15:29:45.177556 5133 scope.go:117] "RemoveContainer" containerID="1f32d9a547c3e89b1dbc117b3367dcef202e778d29472ba17c63c7c527061af7" Nov 21 15:29:45 crc kubenswrapper[5133]: E1121 15:29:45.178065 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1f32d9a547c3e89b1dbc117b3367dcef202e778d29472ba17c63c7c527061af7\": container with ID starting with 1f32d9a547c3e89b1dbc117b3367dcef202e778d29472ba17c63c7c527061af7 not found: ID does not exist" containerID="1f32d9a547c3e89b1dbc117b3367dcef202e778d29472ba17c63c7c527061af7" Nov 21 15:29:45 crc kubenswrapper[5133]: I1121 15:29:45.178152 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1f32d9a547c3e89b1dbc117b3367dcef202e778d29472ba17c63c7c527061af7"} err="failed to get container status \"1f32d9a547c3e89b1dbc117b3367dcef202e778d29472ba17c63c7c527061af7\": rpc error: code = NotFound desc = could not find container \"1f32d9a547c3e89b1dbc117b3367dcef202e778d29472ba17c63c7c527061af7\": container with ID starting with 1f32d9a547c3e89b1dbc117b3367dcef202e778d29472ba17c63c7c527061af7 not found: ID does not exist" Nov 21 15:29:45 crc kubenswrapper[5133]: I1121 15:29:45.178207 5133 scope.go:117] "RemoveContainer" containerID="020e23124e8b35852fa9043ea4ae2a02f2137e9f4e20c5dcb4449941434eb270" Nov 21 15:29:45 crc kubenswrapper[5133]: E1121 15:29:45.178654 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"020e23124e8b35852fa9043ea4ae2a02f2137e9f4e20c5dcb4449941434eb270\": container with ID starting with 020e23124e8b35852fa9043ea4ae2a02f2137e9f4e20c5dcb4449941434eb270 not found: ID does not exist" containerID="020e23124e8b35852fa9043ea4ae2a02f2137e9f4e20c5dcb4449941434eb270" Nov 21 15:29:45 crc kubenswrapper[5133]: I1121 15:29:45.178711 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"020e23124e8b35852fa9043ea4ae2a02f2137e9f4e20c5dcb4449941434eb270"} err="failed to get container status \"020e23124e8b35852fa9043ea4ae2a02f2137e9f4e20c5dcb4449941434eb270\": rpc error: code = NotFound desc = could not find container \"020e23124e8b35852fa9043ea4ae2a02f2137e9f4e20c5dcb4449941434eb270\": container with ID starting with 020e23124e8b35852fa9043ea4ae2a02f2137e9f4e20c5dcb4449941434eb270 not found: ID does not exist" Nov 21 15:29:45 crc kubenswrapper[5133]: I1121 15:29:45.178729 5133 scope.go:117] "RemoveContainer" containerID="43b130fe06e215ca10301644dacf95faf3dd6f50279a205990effb6fdf91790c" Nov 21 15:29:45 crc kubenswrapper[5133]: E1121 15:29:45.179034 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"43b130fe06e215ca10301644dacf95faf3dd6f50279a205990effb6fdf91790c\": container with ID starting with 43b130fe06e215ca10301644dacf95faf3dd6f50279a205990effb6fdf91790c not found: ID does not exist" containerID="43b130fe06e215ca10301644dacf95faf3dd6f50279a205990effb6fdf91790c" Nov 21 15:29:45 crc kubenswrapper[5133]: I1121 15:29:45.179075 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"43b130fe06e215ca10301644dacf95faf3dd6f50279a205990effb6fdf91790c"} err="failed to get container status \"43b130fe06e215ca10301644dacf95faf3dd6f50279a205990effb6fdf91790c\": rpc error: code = NotFound desc = could not find container \"43b130fe06e215ca10301644dacf95faf3dd6f50279a205990effb6fdf91790c\": container with ID starting with 43b130fe06e215ca10301644dacf95faf3dd6f50279a205990effb6fdf91790c not found: ID does not exist" Nov 21 15:29:46 crc kubenswrapper[5133]: I1121 15:29:46.470025 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8219de3f-0c86-4494-aa7c-d9cf106298f7" path="/var/lib/kubelet/pods/8219de3f-0c86-4494-aa7c-d9cf106298f7/volumes" Nov 21 15:30:00 crc kubenswrapper[5133]: I1121 15:30:00.156513 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395650-hccbq"] Nov 21 15:30:00 crc kubenswrapper[5133]: E1121 15:30:00.157471 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8219de3f-0c86-4494-aa7c-d9cf106298f7" containerName="extract-utilities" Nov 21 15:30:00 crc kubenswrapper[5133]: I1121 15:30:00.157486 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="8219de3f-0c86-4494-aa7c-d9cf106298f7" containerName="extract-utilities" Nov 21 15:30:00 crc kubenswrapper[5133]: E1121 15:30:00.157521 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8219de3f-0c86-4494-aa7c-d9cf106298f7" containerName="registry-server" Nov 21 15:30:00 crc kubenswrapper[5133]: I1121 15:30:00.157528 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="8219de3f-0c86-4494-aa7c-d9cf106298f7" containerName="registry-server" Nov 21 15:30:00 crc kubenswrapper[5133]: E1121 15:30:00.157554 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8219de3f-0c86-4494-aa7c-d9cf106298f7" containerName="extract-content" Nov 21 15:30:00 crc kubenswrapper[5133]: I1121 15:30:00.157562 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="8219de3f-0c86-4494-aa7c-d9cf106298f7" containerName="extract-content" Nov 21 15:30:00 crc kubenswrapper[5133]: I1121 15:30:00.157766 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="8219de3f-0c86-4494-aa7c-d9cf106298f7" containerName="registry-server" Nov 21 15:30:00 crc kubenswrapper[5133]: I1121 15:30:00.158518 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395650-hccbq" Nov 21 15:30:00 crc kubenswrapper[5133]: I1121 15:30:00.160537 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 21 15:30:00 crc kubenswrapper[5133]: I1121 15:30:00.161125 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 21 15:30:00 crc kubenswrapper[5133]: I1121 15:30:00.169201 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395650-hccbq"] Nov 21 15:30:00 crc kubenswrapper[5133]: I1121 15:30:00.259833 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/12ff5961-2b53-4177-ba95-e086ac627598-config-volume\") pod \"collect-profiles-29395650-hccbq\" (UID: \"12ff5961-2b53-4177-ba95-e086ac627598\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395650-hccbq" Nov 21 15:30:00 crc kubenswrapper[5133]: I1121 15:30:00.260258 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/12ff5961-2b53-4177-ba95-e086ac627598-secret-volume\") pod \"collect-profiles-29395650-hccbq\" (UID: \"12ff5961-2b53-4177-ba95-e086ac627598\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395650-hccbq" Nov 21 15:30:00 crc kubenswrapper[5133]: I1121 15:30:00.260385 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dnwbw\" (UniqueName: \"kubernetes.io/projected/12ff5961-2b53-4177-ba95-e086ac627598-kube-api-access-dnwbw\") pod \"collect-profiles-29395650-hccbq\" (UID: \"12ff5961-2b53-4177-ba95-e086ac627598\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395650-hccbq" Nov 21 15:30:00 crc kubenswrapper[5133]: I1121 15:30:00.362274 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/12ff5961-2b53-4177-ba95-e086ac627598-secret-volume\") pod \"collect-profiles-29395650-hccbq\" (UID: \"12ff5961-2b53-4177-ba95-e086ac627598\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395650-hccbq" Nov 21 15:30:00 crc kubenswrapper[5133]: I1121 15:30:00.362403 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dnwbw\" (UniqueName: \"kubernetes.io/projected/12ff5961-2b53-4177-ba95-e086ac627598-kube-api-access-dnwbw\") pod \"collect-profiles-29395650-hccbq\" (UID: \"12ff5961-2b53-4177-ba95-e086ac627598\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395650-hccbq" Nov 21 15:30:00 crc kubenswrapper[5133]: I1121 15:30:00.362453 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/12ff5961-2b53-4177-ba95-e086ac627598-config-volume\") pod \"collect-profiles-29395650-hccbq\" (UID: \"12ff5961-2b53-4177-ba95-e086ac627598\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395650-hccbq" Nov 21 15:30:00 crc kubenswrapper[5133]: I1121 15:30:00.363343 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/12ff5961-2b53-4177-ba95-e086ac627598-config-volume\") pod \"collect-profiles-29395650-hccbq\" (UID: \"12ff5961-2b53-4177-ba95-e086ac627598\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395650-hccbq" Nov 21 15:30:00 crc kubenswrapper[5133]: I1121 15:30:00.368898 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/12ff5961-2b53-4177-ba95-e086ac627598-secret-volume\") pod \"collect-profiles-29395650-hccbq\" (UID: \"12ff5961-2b53-4177-ba95-e086ac627598\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395650-hccbq" Nov 21 15:30:00 crc kubenswrapper[5133]: I1121 15:30:00.380176 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dnwbw\" (UniqueName: \"kubernetes.io/projected/12ff5961-2b53-4177-ba95-e086ac627598-kube-api-access-dnwbw\") pod \"collect-profiles-29395650-hccbq\" (UID: \"12ff5961-2b53-4177-ba95-e086ac627598\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395650-hccbq" Nov 21 15:30:00 crc kubenswrapper[5133]: I1121 15:30:00.488095 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395650-hccbq" Nov 21 15:30:00 crc kubenswrapper[5133]: I1121 15:30:00.937770 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395650-hccbq"] Nov 21 15:30:01 crc kubenswrapper[5133]: I1121 15:30:01.199395 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395650-hccbq" event={"ID":"12ff5961-2b53-4177-ba95-e086ac627598","Type":"ContainerStarted","Data":"1d99376ea00c3c3c7c8cc6c4711907090c3cbb04f5dadb1d46d33998db8b8825"} Nov 21 15:30:01 crc kubenswrapper[5133]: I1121 15:30:01.199776 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395650-hccbq" event={"ID":"12ff5961-2b53-4177-ba95-e086ac627598","Type":"ContainerStarted","Data":"8ab9fe50035a4269dc0d97c50f22a0da845c53a86a39bbb90cbf36d14331e0da"} Nov 21 15:30:01 crc kubenswrapper[5133]: I1121 15:30:01.220448 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29395650-hccbq" podStartSLOduration=1.220424343 podStartE2EDuration="1.220424343s" podCreationTimestamp="2025-11-21 15:30:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:30:01.212172095 +0000 UTC m=+6461.010004343" watchObservedRunningTime="2025-11-21 15:30:01.220424343 +0000 UTC m=+6461.018256591" Nov 21 15:30:02 crc kubenswrapper[5133]: I1121 15:30:02.210614 5133 generic.go:334] "Generic (PLEG): container finished" podID="12ff5961-2b53-4177-ba95-e086ac627598" containerID="1d99376ea00c3c3c7c8cc6c4711907090c3cbb04f5dadb1d46d33998db8b8825" exitCode=0 Nov 21 15:30:02 crc kubenswrapper[5133]: I1121 15:30:02.210742 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395650-hccbq" event={"ID":"12ff5961-2b53-4177-ba95-e086ac627598","Type":"ContainerDied","Data":"1d99376ea00c3c3c7c8cc6c4711907090c3cbb04f5dadb1d46d33998db8b8825"} Nov 21 15:30:03 crc kubenswrapper[5133]: I1121 15:30:03.658215 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395650-hccbq" Nov 21 15:30:03 crc kubenswrapper[5133]: I1121 15:30:03.831976 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/12ff5961-2b53-4177-ba95-e086ac627598-config-volume\") pod \"12ff5961-2b53-4177-ba95-e086ac627598\" (UID: \"12ff5961-2b53-4177-ba95-e086ac627598\") " Nov 21 15:30:03 crc kubenswrapper[5133]: I1121 15:30:03.832154 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dnwbw\" (UniqueName: \"kubernetes.io/projected/12ff5961-2b53-4177-ba95-e086ac627598-kube-api-access-dnwbw\") pod \"12ff5961-2b53-4177-ba95-e086ac627598\" (UID: \"12ff5961-2b53-4177-ba95-e086ac627598\") " Nov 21 15:30:03 crc kubenswrapper[5133]: I1121 15:30:03.832198 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/12ff5961-2b53-4177-ba95-e086ac627598-secret-volume\") pod \"12ff5961-2b53-4177-ba95-e086ac627598\" (UID: \"12ff5961-2b53-4177-ba95-e086ac627598\") " Nov 21 15:30:03 crc kubenswrapper[5133]: I1121 15:30:03.832574 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/12ff5961-2b53-4177-ba95-e086ac627598-config-volume" (OuterVolumeSpecName: "config-volume") pod "12ff5961-2b53-4177-ba95-e086ac627598" (UID: "12ff5961-2b53-4177-ba95-e086ac627598"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:30:03 crc kubenswrapper[5133]: I1121 15:30:03.838269 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/12ff5961-2b53-4177-ba95-e086ac627598-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "12ff5961-2b53-4177-ba95-e086ac627598" (UID: "12ff5961-2b53-4177-ba95-e086ac627598"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:30:03 crc kubenswrapper[5133]: I1121 15:30:03.838954 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/12ff5961-2b53-4177-ba95-e086ac627598-kube-api-access-dnwbw" (OuterVolumeSpecName: "kube-api-access-dnwbw") pod "12ff5961-2b53-4177-ba95-e086ac627598" (UID: "12ff5961-2b53-4177-ba95-e086ac627598"). InnerVolumeSpecName "kube-api-access-dnwbw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:30:03 crc kubenswrapper[5133]: I1121 15:30:03.935093 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dnwbw\" (UniqueName: \"kubernetes.io/projected/12ff5961-2b53-4177-ba95-e086ac627598-kube-api-access-dnwbw\") on node \"crc\" DevicePath \"\"" Nov 21 15:30:03 crc kubenswrapper[5133]: I1121 15:30:03.935137 5133 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/12ff5961-2b53-4177-ba95-e086ac627598-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 21 15:30:03 crc kubenswrapper[5133]: I1121 15:30:03.935150 5133 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/12ff5961-2b53-4177-ba95-e086ac627598-config-volume\") on node \"crc\" DevicePath \"\"" Nov 21 15:30:04 crc kubenswrapper[5133]: I1121 15:30:04.231096 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395650-hccbq" event={"ID":"12ff5961-2b53-4177-ba95-e086ac627598","Type":"ContainerDied","Data":"8ab9fe50035a4269dc0d97c50f22a0da845c53a86a39bbb90cbf36d14331e0da"} Nov 21 15:30:04 crc kubenswrapper[5133]: I1121 15:30:04.231391 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8ab9fe50035a4269dc0d97c50f22a0da845c53a86a39bbb90cbf36d14331e0da" Nov 21 15:30:04 crc kubenswrapper[5133]: I1121 15:30:04.231126 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395650-hccbq" Nov 21 15:30:04 crc kubenswrapper[5133]: I1121 15:30:04.290602 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395605-vl8bk"] Nov 21 15:30:04 crc kubenswrapper[5133]: I1121 15:30:04.298218 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395605-vl8bk"] Nov 21 15:30:04 crc kubenswrapper[5133]: I1121 15:30:04.467652 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e73d3a2d-e8eb-422d-84f8-e938f8594392" path="/var/lib/kubelet/pods/e73d3a2d-e8eb-422d-84f8-e938f8594392/volumes" Nov 21 15:30:19 crc kubenswrapper[5133]: I1121 15:30:19.226058 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-xsl7c"] Nov 21 15:30:19 crc kubenswrapper[5133]: E1121 15:30:19.227878 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12ff5961-2b53-4177-ba95-e086ac627598" containerName="collect-profiles" Nov 21 15:30:19 crc kubenswrapper[5133]: I1121 15:30:19.227968 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="12ff5961-2b53-4177-ba95-e086ac627598" containerName="collect-profiles" Nov 21 15:30:19 crc kubenswrapper[5133]: I1121 15:30:19.228254 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="12ff5961-2b53-4177-ba95-e086ac627598" containerName="collect-profiles" Nov 21 15:30:19 crc kubenswrapper[5133]: I1121 15:30:19.229655 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xsl7c" Nov 21 15:30:19 crc kubenswrapper[5133]: I1121 15:30:19.239327 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-xsl7c"] Nov 21 15:30:19 crc kubenswrapper[5133]: I1121 15:30:19.328136 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6cg4t\" (UniqueName: \"kubernetes.io/projected/114fb0c3-73de-486f-8de8-01ee44f56547-kube-api-access-6cg4t\") pod \"redhat-operators-xsl7c\" (UID: \"114fb0c3-73de-486f-8de8-01ee44f56547\") " pod="openshift-marketplace/redhat-operators-xsl7c" Nov 21 15:30:19 crc kubenswrapper[5133]: I1121 15:30:19.328219 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/114fb0c3-73de-486f-8de8-01ee44f56547-utilities\") pod \"redhat-operators-xsl7c\" (UID: \"114fb0c3-73de-486f-8de8-01ee44f56547\") " pod="openshift-marketplace/redhat-operators-xsl7c" Nov 21 15:30:19 crc kubenswrapper[5133]: I1121 15:30:19.328548 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/114fb0c3-73de-486f-8de8-01ee44f56547-catalog-content\") pod \"redhat-operators-xsl7c\" (UID: \"114fb0c3-73de-486f-8de8-01ee44f56547\") " pod="openshift-marketplace/redhat-operators-xsl7c" Nov 21 15:30:19 crc kubenswrapper[5133]: I1121 15:30:19.430741 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/114fb0c3-73de-486f-8de8-01ee44f56547-catalog-content\") pod \"redhat-operators-xsl7c\" (UID: \"114fb0c3-73de-486f-8de8-01ee44f56547\") " pod="openshift-marketplace/redhat-operators-xsl7c" Nov 21 15:30:19 crc kubenswrapper[5133]: I1121 15:30:19.430854 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6cg4t\" (UniqueName: \"kubernetes.io/projected/114fb0c3-73de-486f-8de8-01ee44f56547-kube-api-access-6cg4t\") pod \"redhat-operators-xsl7c\" (UID: \"114fb0c3-73de-486f-8de8-01ee44f56547\") " pod="openshift-marketplace/redhat-operators-xsl7c" Nov 21 15:30:19 crc kubenswrapper[5133]: I1121 15:30:19.430904 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/114fb0c3-73de-486f-8de8-01ee44f56547-utilities\") pod \"redhat-operators-xsl7c\" (UID: \"114fb0c3-73de-486f-8de8-01ee44f56547\") " pod="openshift-marketplace/redhat-operators-xsl7c" Nov 21 15:30:19 crc kubenswrapper[5133]: I1121 15:30:19.431464 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/114fb0c3-73de-486f-8de8-01ee44f56547-utilities\") pod \"redhat-operators-xsl7c\" (UID: \"114fb0c3-73de-486f-8de8-01ee44f56547\") " pod="openshift-marketplace/redhat-operators-xsl7c" Nov 21 15:30:19 crc kubenswrapper[5133]: I1121 15:30:19.431506 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/114fb0c3-73de-486f-8de8-01ee44f56547-catalog-content\") pod \"redhat-operators-xsl7c\" (UID: \"114fb0c3-73de-486f-8de8-01ee44f56547\") " pod="openshift-marketplace/redhat-operators-xsl7c" Nov 21 15:30:19 crc kubenswrapper[5133]: I1121 15:30:19.448931 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6cg4t\" (UniqueName: \"kubernetes.io/projected/114fb0c3-73de-486f-8de8-01ee44f56547-kube-api-access-6cg4t\") pod \"redhat-operators-xsl7c\" (UID: \"114fb0c3-73de-486f-8de8-01ee44f56547\") " pod="openshift-marketplace/redhat-operators-xsl7c" Nov 21 15:30:19 crc kubenswrapper[5133]: I1121 15:30:19.567070 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xsl7c" Nov 21 15:30:20 crc kubenswrapper[5133]: I1121 15:30:20.080607 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-xsl7c"] Nov 21 15:30:20 crc kubenswrapper[5133]: I1121 15:30:20.378848 5133 generic.go:334] "Generic (PLEG): container finished" podID="114fb0c3-73de-486f-8de8-01ee44f56547" containerID="6bb8272a1554fb92c66db27ee3bf87defd48f9c375782abdaafd717749fee67b" exitCode=0 Nov 21 15:30:20 crc kubenswrapper[5133]: I1121 15:30:20.378893 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xsl7c" event={"ID":"114fb0c3-73de-486f-8de8-01ee44f56547","Type":"ContainerDied","Data":"6bb8272a1554fb92c66db27ee3bf87defd48f9c375782abdaafd717749fee67b"} Nov 21 15:30:20 crc kubenswrapper[5133]: I1121 15:30:20.378956 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xsl7c" event={"ID":"114fb0c3-73de-486f-8de8-01ee44f56547","Type":"ContainerStarted","Data":"478624bdd77c7f3d31e2508e3495d6c4bee7b2c9b114005362c094c73e4cd24f"} Nov 21 15:30:22 crc kubenswrapper[5133]: I1121 15:30:22.400047 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xsl7c" event={"ID":"114fb0c3-73de-486f-8de8-01ee44f56547","Type":"ContainerStarted","Data":"92a682ac51b6ccff2b2ec29a80fdb6caf61293f860806803f685173023af4563"} Nov 21 15:30:23 crc kubenswrapper[5133]: I1121 15:30:23.311238 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 15:30:23 crc kubenswrapper[5133]: I1121 15:30:23.311684 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 15:30:23 crc kubenswrapper[5133]: I1121 15:30:23.409894 5133 generic.go:334] "Generic (PLEG): container finished" podID="114fb0c3-73de-486f-8de8-01ee44f56547" containerID="92a682ac51b6ccff2b2ec29a80fdb6caf61293f860806803f685173023af4563" exitCode=0 Nov 21 15:30:23 crc kubenswrapper[5133]: I1121 15:30:23.409941 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xsl7c" event={"ID":"114fb0c3-73de-486f-8de8-01ee44f56547","Type":"ContainerDied","Data":"92a682ac51b6ccff2b2ec29a80fdb6caf61293f860806803f685173023af4563"} Nov 21 15:30:24 crc kubenswrapper[5133]: I1121 15:30:24.426691 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xsl7c" event={"ID":"114fb0c3-73de-486f-8de8-01ee44f56547","Type":"ContainerStarted","Data":"d86a7c4cbee6159c944172f21a176f5b2a35c571fa174094f5d9d572f21c342f"} Nov 21 15:30:24 crc kubenswrapper[5133]: I1121 15:30:24.461749 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-xsl7c" podStartSLOduration=1.9848490810000001 podStartE2EDuration="5.461718064s" podCreationTimestamp="2025-11-21 15:30:19 +0000 UTC" firstStartedPulling="2025-11-21 15:30:20.381097082 +0000 UTC m=+6480.178929330" lastFinishedPulling="2025-11-21 15:30:23.857966025 +0000 UTC m=+6483.655798313" observedRunningTime="2025-11-21 15:30:24.450415006 +0000 UTC m=+6484.248247264" watchObservedRunningTime="2025-11-21 15:30:24.461718064 +0000 UTC m=+6484.259550322" Nov 21 15:30:29 crc kubenswrapper[5133]: I1121 15:30:29.567404 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-xsl7c" Nov 21 15:30:29 crc kubenswrapper[5133]: I1121 15:30:29.568794 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-xsl7c" Nov 21 15:30:30 crc kubenswrapper[5133]: I1121 15:30:30.615788 5133 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-xsl7c" podUID="114fb0c3-73de-486f-8de8-01ee44f56547" containerName="registry-server" probeResult="failure" output=< Nov 21 15:30:30 crc kubenswrapper[5133]: timeout: failed to connect service ":50051" within 1s Nov 21 15:30:30 crc kubenswrapper[5133]: > Nov 21 15:30:39 crc kubenswrapper[5133]: I1121 15:30:39.626507 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-xsl7c" Nov 21 15:30:39 crc kubenswrapper[5133]: I1121 15:30:39.675204 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-xsl7c" Nov 21 15:30:39 crc kubenswrapper[5133]: I1121 15:30:39.865106 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-xsl7c"] Nov 21 15:30:41 crc kubenswrapper[5133]: I1121 15:30:41.584352 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-xsl7c" podUID="114fb0c3-73de-486f-8de8-01ee44f56547" containerName="registry-server" containerID="cri-o://d86a7c4cbee6159c944172f21a176f5b2a35c571fa174094f5d9d572f21c342f" gracePeriod=2 Nov 21 15:30:42 crc kubenswrapper[5133]: I1121 15:30:42.094690 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xsl7c" Nov 21 15:30:42 crc kubenswrapper[5133]: I1121 15:30:42.191484 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/114fb0c3-73de-486f-8de8-01ee44f56547-utilities\") pod \"114fb0c3-73de-486f-8de8-01ee44f56547\" (UID: \"114fb0c3-73de-486f-8de8-01ee44f56547\") " Nov 21 15:30:42 crc kubenswrapper[5133]: I1121 15:30:42.191885 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6cg4t\" (UniqueName: \"kubernetes.io/projected/114fb0c3-73de-486f-8de8-01ee44f56547-kube-api-access-6cg4t\") pod \"114fb0c3-73de-486f-8de8-01ee44f56547\" (UID: \"114fb0c3-73de-486f-8de8-01ee44f56547\") " Nov 21 15:30:42 crc kubenswrapper[5133]: I1121 15:30:42.191969 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/114fb0c3-73de-486f-8de8-01ee44f56547-catalog-content\") pod \"114fb0c3-73de-486f-8de8-01ee44f56547\" (UID: \"114fb0c3-73de-486f-8de8-01ee44f56547\") " Nov 21 15:30:42 crc kubenswrapper[5133]: I1121 15:30:42.198260 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/114fb0c3-73de-486f-8de8-01ee44f56547-kube-api-access-6cg4t" (OuterVolumeSpecName: "kube-api-access-6cg4t") pod "114fb0c3-73de-486f-8de8-01ee44f56547" (UID: "114fb0c3-73de-486f-8de8-01ee44f56547"). InnerVolumeSpecName "kube-api-access-6cg4t". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:30:42 crc kubenswrapper[5133]: I1121 15:30:42.198705 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/114fb0c3-73de-486f-8de8-01ee44f56547-utilities" (OuterVolumeSpecName: "utilities") pod "114fb0c3-73de-486f-8de8-01ee44f56547" (UID: "114fb0c3-73de-486f-8de8-01ee44f56547"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:30:42 crc kubenswrapper[5133]: I1121 15:30:42.290390 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/114fb0c3-73de-486f-8de8-01ee44f56547-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "114fb0c3-73de-486f-8de8-01ee44f56547" (UID: "114fb0c3-73de-486f-8de8-01ee44f56547"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:30:42 crc kubenswrapper[5133]: I1121 15:30:42.294095 5133 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/114fb0c3-73de-486f-8de8-01ee44f56547-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 15:30:42 crc kubenswrapper[5133]: I1121 15:30:42.294135 5133 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/114fb0c3-73de-486f-8de8-01ee44f56547-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 15:30:42 crc kubenswrapper[5133]: I1121 15:30:42.294149 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6cg4t\" (UniqueName: \"kubernetes.io/projected/114fb0c3-73de-486f-8de8-01ee44f56547-kube-api-access-6cg4t\") on node \"crc\" DevicePath \"\"" Nov 21 15:30:42 crc kubenswrapper[5133]: I1121 15:30:42.596441 5133 generic.go:334] "Generic (PLEG): container finished" podID="114fb0c3-73de-486f-8de8-01ee44f56547" containerID="d86a7c4cbee6159c944172f21a176f5b2a35c571fa174094f5d9d572f21c342f" exitCode=0 Nov 21 15:30:42 crc kubenswrapper[5133]: I1121 15:30:42.596492 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xsl7c" event={"ID":"114fb0c3-73de-486f-8de8-01ee44f56547","Type":"ContainerDied","Data":"d86a7c4cbee6159c944172f21a176f5b2a35c571fa174094f5d9d572f21c342f"} Nov 21 15:30:42 crc kubenswrapper[5133]: I1121 15:30:42.596510 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xsl7c" Nov 21 15:30:42 crc kubenswrapper[5133]: I1121 15:30:42.596537 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xsl7c" event={"ID":"114fb0c3-73de-486f-8de8-01ee44f56547","Type":"ContainerDied","Data":"478624bdd77c7f3d31e2508e3495d6c4bee7b2c9b114005362c094c73e4cd24f"} Nov 21 15:30:42 crc kubenswrapper[5133]: I1121 15:30:42.596563 5133 scope.go:117] "RemoveContainer" containerID="d86a7c4cbee6159c944172f21a176f5b2a35c571fa174094f5d9d572f21c342f" Nov 21 15:30:42 crc kubenswrapper[5133]: I1121 15:30:42.628867 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-xsl7c"] Nov 21 15:30:42 crc kubenswrapper[5133]: I1121 15:30:42.635158 5133 scope.go:117] "RemoveContainer" containerID="92a682ac51b6ccff2b2ec29a80fdb6caf61293f860806803f685173023af4563" Nov 21 15:30:42 crc kubenswrapper[5133]: I1121 15:30:42.641579 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-xsl7c"] Nov 21 15:30:42 crc kubenswrapper[5133]: I1121 15:30:42.661408 5133 scope.go:117] "RemoveContainer" containerID="6bb8272a1554fb92c66db27ee3bf87defd48f9c375782abdaafd717749fee67b" Nov 21 15:30:42 crc kubenswrapper[5133]: I1121 15:30:42.706323 5133 scope.go:117] "RemoveContainer" containerID="d86a7c4cbee6159c944172f21a176f5b2a35c571fa174094f5d9d572f21c342f" Nov 21 15:30:42 crc kubenswrapper[5133]: E1121 15:30:42.706941 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d86a7c4cbee6159c944172f21a176f5b2a35c571fa174094f5d9d572f21c342f\": container with ID starting with d86a7c4cbee6159c944172f21a176f5b2a35c571fa174094f5d9d572f21c342f not found: ID does not exist" containerID="d86a7c4cbee6159c944172f21a176f5b2a35c571fa174094f5d9d572f21c342f" Nov 21 15:30:42 crc kubenswrapper[5133]: I1121 15:30:42.706992 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d86a7c4cbee6159c944172f21a176f5b2a35c571fa174094f5d9d572f21c342f"} err="failed to get container status \"d86a7c4cbee6159c944172f21a176f5b2a35c571fa174094f5d9d572f21c342f\": rpc error: code = NotFound desc = could not find container \"d86a7c4cbee6159c944172f21a176f5b2a35c571fa174094f5d9d572f21c342f\": container with ID starting with d86a7c4cbee6159c944172f21a176f5b2a35c571fa174094f5d9d572f21c342f not found: ID does not exist" Nov 21 15:30:42 crc kubenswrapper[5133]: I1121 15:30:42.707052 5133 scope.go:117] "RemoveContainer" containerID="92a682ac51b6ccff2b2ec29a80fdb6caf61293f860806803f685173023af4563" Nov 21 15:30:42 crc kubenswrapper[5133]: E1121 15:30:42.707596 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"92a682ac51b6ccff2b2ec29a80fdb6caf61293f860806803f685173023af4563\": container with ID starting with 92a682ac51b6ccff2b2ec29a80fdb6caf61293f860806803f685173023af4563 not found: ID does not exist" containerID="92a682ac51b6ccff2b2ec29a80fdb6caf61293f860806803f685173023af4563" Nov 21 15:30:42 crc kubenswrapper[5133]: I1121 15:30:42.707638 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"92a682ac51b6ccff2b2ec29a80fdb6caf61293f860806803f685173023af4563"} err="failed to get container status \"92a682ac51b6ccff2b2ec29a80fdb6caf61293f860806803f685173023af4563\": rpc error: code = NotFound desc = could not find container \"92a682ac51b6ccff2b2ec29a80fdb6caf61293f860806803f685173023af4563\": container with ID starting with 92a682ac51b6ccff2b2ec29a80fdb6caf61293f860806803f685173023af4563 not found: ID does not exist" Nov 21 15:30:42 crc kubenswrapper[5133]: I1121 15:30:42.707667 5133 scope.go:117] "RemoveContainer" containerID="6bb8272a1554fb92c66db27ee3bf87defd48f9c375782abdaafd717749fee67b" Nov 21 15:30:42 crc kubenswrapper[5133]: E1121 15:30:42.708087 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6bb8272a1554fb92c66db27ee3bf87defd48f9c375782abdaafd717749fee67b\": container with ID starting with 6bb8272a1554fb92c66db27ee3bf87defd48f9c375782abdaafd717749fee67b not found: ID does not exist" containerID="6bb8272a1554fb92c66db27ee3bf87defd48f9c375782abdaafd717749fee67b" Nov 21 15:30:42 crc kubenswrapper[5133]: I1121 15:30:42.708109 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6bb8272a1554fb92c66db27ee3bf87defd48f9c375782abdaafd717749fee67b"} err="failed to get container status \"6bb8272a1554fb92c66db27ee3bf87defd48f9c375782abdaafd717749fee67b\": rpc error: code = NotFound desc = could not find container \"6bb8272a1554fb92c66db27ee3bf87defd48f9c375782abdaafd717749fee67b\": container with ID starting with 6bb8272a1554fb92c66db27ee3bf87defd48f9c375782abdaafd717749fee67b not found: ID does not exist" Nov 21 15:30:43 crc kubenswrapper[5133]: I1121 15:30:43.676942 5133 scope.go:117] "RemoveContainer" containerID="cf6eb7d4068c3ae9a3d124af01410353e736b42fb84428f9ce10bcbe4f152c6f" Nov 21 15:30:44 crc kubenswrapper[5133]: I1121 15:30:44.475611 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="114fb0c3-73de-486f-8de8-01ee44f56547" path="/var/lib/kubelet/pods/114fb0c3-73de-486f-8de8-01ee44f56547/volumes" Nov 21 15:30:53 crc kubenswrapper[5133]: I1121 15:30:53.310947 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 15:30:53 crc kubenswrapper[5133]: I1121 15:30:53.311689 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 15:31:23 crc kubenswrapper[5133]: I1121 15:31:23.311263 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 15:31:23 crc kubenswrapper[5133]: I1121 15:31:23.312094 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 15:31:23 crc kubenswrapper[5133]: I1121 15:31:23.312182 5133 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" Nov 21 15:31:23 crc kubenswrapper[5133]: I1121 15:31:23.313485 5133 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"b88d3b7b176333c05c13a9dc77345d383250a38ab5512a29acb877d8889770b9"} pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 15:31:23 crc kubenswrapper[5133]: I1121 15:31:23.313579 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" containerID="cri-o://b88d3b7b176333c05c13a9dc77345d383250a38ab5512a29acb877d8889770b9" gracePeriod=600 Nov 21 15:31:23 crc kubenswrapper[5133]: E1121 15:31:23.437278 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:31:24 crc kubenswrapper[5133]: I1121 15:31:24.039873 5133 generic.go:334] "Generic (PLEG): container finished" podID="52f5a729-05d1-4f84-a216-1df3233af57d" containerID="b88d3b7b176333c05c13a9dc77345d383250a38ab5512a29acb877d8889770b9" exitCode=0 Nov 21 15:31:24 crc kubenswrapper[5133]: I1121 15:31:24.040457 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" event={"ID":"52f5a729-05d1-4f84-a216-1df3233af57d","Type":"ContainerDied","Data":"b88d3b7b176333c05c13a9dc77345d383250a38ab5512a29acb877d8889770b9"} Nov 21 15:31:24 crc kubenswrapper[5133]: I1121 15:31:24.040651 5133 scope.go:117] "RemoveContainer" containerID="0cb9ffeafef00afac56a59dc163a419de0e518b8c46e5190c87e02de2302575b" Nov 21 15:31:24 crc kubenswrapper[5133]: I1121 15:31:24.041524 5133 scope.go:117] "RemoveContainer" containerID="b88d3b7b176333c05c13a9dc77345d383250a38ab5512a29acb877d8889770b9" Nov 21 15:31:24 crc kubenswrapper[5133]: E1121 15:31:24.044617 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:31:36 crc kubenswrapper[5133]: I1121 15:31:36.458556 5133 scope.go:117] "RemoveContainer" containerID="b88d3b7b176333c05c13a9dc77345d383250a38ab5512a29acb877d8889770b9" Nov 21 15:31:36 crc kubenswrapper[5133]: E1121 15:31:36.459699 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:31:48 crc kubenswrapper[5133]: I1121 15:31:48.458249 5133 scope.go:117] "RemoveContainer" containerID="b88d3b7b176333c05c13a9dc77345d383250a38ab5512a29acb877d8889770b9" Nov 21 15:31:48 crc kubenswrapper[5133]: E1121 15:31:48.459178 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:32:03 crc kubenswrapper[5133]: I1121 15:32:03.458245 5133 scope.go:117] "RemoveContainer" containerID="b88d3b7b176333c05c13a9dc77345d383250a38ab5512a29acb877d8889770b9" Nov 21 15:32:03 crc kubenswrapper[5133]: E1121 15:32:03.459681 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:32:17 crc kubenswrapper[5133]: I1121 15:32:17.457460 5133 scope.go:117] "RemoveContainer" containerID="b88d3b7b176333c05c13a9dc77345d383250a38ab5512a29acb877d8889770b9" Nov 21 15:32:17 crc kubenswrapper[5133]: E1121 15:32:17.458172 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:32:21 crc kubenswrapper[5133]: I1121 15:32:21.493913 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-rl6wh"] Nov 21 15:32:21 crc kubenswrapper[5133]: E1121 15:32:21.496301 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="114fb0c3-73de-486f-8de8-01ee44f56547" containerName="extract-utilities" Nov 21 15:32:21 crc kubenswrapper[5133]: I1121 15:32:21.496415 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="114fb0c3-73de-486f-8de8-01ee44f56547" containerName="extract-utilities" Nov 21 15:32:21 crc kubenswrapper[5133]: E1121 15:32:21.496513 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="114fb0c3-73de-486f-8de8-01ee44f56547" containerName="extract-content" Nov 21 15:32:21 crc kubenswrapper[5133]: I1121 15:32:21.496582 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="114fb0c3-73de-486f-8de8-01ee44f56547" containerName="extract-content" Nov 21 15:32:21 crc kubenswrapper[5133]: E1121 15:32:21.496684 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="114fb0c3-73de-486f-8de8-01ee44f56547" containerName="registry-server" Nov 21 15:32:21 crc kubenswrapper[5133]: I1121 15:32:21.496754 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="114fb0c3-73de-486f-8de8-01ee44f56547" containerName="registry-server" Nov 21 15:32:21 crc kubenswrapper[5133]: I1121 15:32:21.497067 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="114fb0c3-73de-486f-8de8-01ee44f56547" containerName="registry-server" Nov 21 15:32:21 crc kubenswrapper[5133]: I1121 15:32:21.498827 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rl6wh" Nov 21 15:32:21 crc kubenswrapper[5133]: I1121 15:32:21.526235 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-rl6wh"] Nov 21 15:32:21 crc kubenswrapper[5133]: I1121 15:32:21.636482 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1924ca56-41bb-4baf-80e0-880785417fcd-utilities\") pod \"certified-operators-rl6wh\" (UID: \"1924ca56-41bb-4baf-80e0-880785417fcd\") " pod="openshift-marketplace/certified-operators-rl6wh" Nov 21 15:32:21 crc kubenswrapper[5133]: I1121 15:32:21.636579 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1924ca56-41bb-4baf-80e0-880785417fcd-catalog-content\") pod \"certified-operators-rl6wh\" (UID: \"1924ca56-41bb-4baf-80e0-880785417fcd\") " pod="openshift-marketplace/certified-operators-rl6wh" Nov 21 15:32:21 crc kubenswrapper[5133]: I1121 15:32:21.636633 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gbnlb\" (UniqueName: \"kubernetes.io/projected/1924ca56-41bb-4baf-80e0-880785417fcd-kube-api-access-gbnlb\") pod \"certified-operators-rl6wh\" (UID: \"1924ca56-41bb-4baf-80e0-880785417fcd\") " pod="openshift-marketplace/certified-operators-rl6wh" Nov 21 15:32:21 crc kubenswrapper[5133]: I1121 15:32:21.739221 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1924ca56-41bb-4baf-80e0-880785417fcd-utilities\") pod \"certified-operators-rl6wh\" (UID: \"1924ca56-41bb-4baf-80e0-880785417fcd\") " pod="openshift-marketplace/certified-operators-rl6wh" Nov 21 15:32:21 crc kubenswrapper[5133]: I1121 15:32:21.739372 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1924ca56-41bb-4baf-80e0-880785417fcd-catalog-content\") pod \"certified-operators-rl6wh\" (UID: \"1924ca56-41bb-4baf-80e0-880785417fcd\") " pod="openshift-marketplace/certified-operators-rl6wh" Nov 21 15:32:21 crc kubenswrapper[5133]: I1121 15:32:21.739446 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gbnlb\" (UniqueName: \"kubernetes.io/projected/1924ca56-41bb-4baf-80e0-880785417fcd-kube-api-access-gbnlb\") pod \"certified-operators-rl6wh\" (UID: \"1924ca56-41bb-4baf-80e0-880785417fcd\") " pod="openshift-marketplace/certified-operators-rl6wh" Nov 21 15:32:21 crc kubenswrapper[5133]: I1121 15:32:21.739748 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1924ca56-41bb-4baf-80e0-880785417fcd-utilities\") pod \"certified-operators-rl6wh\" (UID: \"1924ca56-41bb-4baf-80e0-880785417fcd\") " pod="openshift-marketplace/certified-operators-rl6wh" Nov 21 15:32:21 crc kubenswrapper[5133]: I1121 15:32:21.739834 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1924ca56-41bb-4baf-80e0-880785417fcd-catalog-content\") pod \"certified-operators-rl6wh\" (UID: \"1924ca56-41bb-4baf-80e0-880785417fcd\") " pod="openshift-marketplace/certified-operators-rl6wh" Nov 21 15:32:21 crc kubenswrapper[5133]: I1121 15:32:21.778755 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gbnlb\" (UniqueName: \"kubernetes.io/projected/1924ca56-41bb-4baf-80e0-880785417fcd-kube-api-access-gbnlb\") pod \"certified-operators-rl6wh\" (UID: \"1924ca56-41bb-4baf-80e0-880785417fcd\") " pod="openshift-marketplace/certified-operators-rl6wh" Nov 21 15:32:21 crc kubenswrapper[5133]: I1121 15:32:21.822460 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rl6wh" Nov 21 15:32:22 crc kubenswrapper[5133]: I1121 15:32:22.331391 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-rl6wh"] Nov 21 15:32:22 crc kubenswrapper[5133]: I1121 15:32:22.641460 5133 generic.go:334] "Generic (PLEG): container finished" podID="1924ca56-41bb-4baf-80e0-880785417fcd" containerID="60c0cc62798f41ee311fe6df97b812648029e620f5dda5b408ef42ab320d5398" exitCode=0 Nov 21 15:32:22 crc kubenswrapper[5133]: I1121 15:32:22.641550 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rl6wh" event={"ID":"1924ca56-41bb-4baf-80e0-880785417fcd","Type":"ContainerDied","Data":"60c0cc62798f41ee311fe6df97b812648029e620f5dda5b408ef42ab320d5398"} Nov 21 15:32:22 crc kubenswrapper[5133]: I1121 15:32:22.641739 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rl6wh" event={"ID":"1924ca56-41bb-4baf-80e0-880785417fcd","Type":"ContainerStarted","Data":"23f3d5900482dde8f8325dd16a6a830ae7a7bd24dc9abcf3d55c028a6b897ad1"} Nov 21 15:32:23 crc kubenswrapper[5133]: I1121 15:32:23.654645 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rl6wh" event={"ID":"1924ca56-41bb-4baf-80e0-880785417fcd","Type":"ContainerStarted","Data":"3f81b1e2f7fa5a038cec795fd777ef6f3a37a2ab1d04b43ba565c7274bb95ec0"} Nov 21 15:32:24 crc kubenswrapper[5133]: I1121 15:32:24.668146 5133 generic.go:334] "Generic (PLEG): container finished" podID="1924ca56-41bb-4baf-80e0-880785417fcd" containerID="3f81b1e2f7fa5a038cec795fd777ef6f3a37a2ab1d04b43ba565c7274bb95ec0" exitCode=0 Nov 21 15:32:24 crc kubenswrapper[5133]: I1121 15:32:24.668280 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rl6wh" event={"ID":"1924ca56-41bb-4baf-80e0-880785417fcd","Type":"ContainerDied","Data":"3f81b1e2f7fa5a038cec795fd777ef6f3a37a2ab1d04b43ba565c7274bb95ec0"} Nov 21 15:32:25 crc kubenswrapper[5133]: I1121 15:32:25.681119 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rl6wh" event={"ID":"1924ca56-41bb-4baf-80e0-880785417fcd","Type":"ContainerStarted","Data":"4ab362cdf9a455af2df417ed41f3a96b8759d655160e01760b87ca60ce5abd65"} Nov 21 15:32:25 crc kubenswrapper[5133]: I1121 15:32:25.704663 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-rl6wh" podStartSLOduration=2.2832936200000002 podStartE2EDuration="4.704633271s" podCreationTimestamp="2025-11-21 15:32:21 +0000 UTC" firstStartedPulling="2025-11-21 15:32:22.643575862 +0000 UTC m=+6602.441408110" lastFinishedPulling="2025-11-21 15:32:25.064915513 +0000 UTC m=+6604.862747761" observedRunningTime="2025-11-21 15:32:25.699399113 +0000 UTC m=+6605.497231371" watchObservedRunningTime="2025-11-21 15:32:25.704633271 +0000 UTC m=+6605.502465529" Nov 21 15:32:29 crc kubenswrapper[5133]: I1121 15:32:29.458172 5133 scope.go:117] "RemoveContainer" containerID="b88d3b7b176333c05c13a9dc77345d383250a38ab5512a29acb877d8889770b9" Nov 21 15:32:29 crc kubenswrapper[5133]: E1121 15:32:29.458958 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:32:31 crc kubenswrapper[5133]: I1121 15:32:31.823114 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-rl6wh" Nov 21 15:32:31 crc kubenswrapper[5133]: I1121 15:32:31.823952 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-rl6wh" Nov 21 15:32:31 crc kubenswrapper[5133]: I1121 15:32:31.891052 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-rl6wh" Nov 21 15:32:32 crc kubenswrapper[5133]: I1121 15:32:32.808991 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-rl6wh" Nov 21 15:32:32 crc kubenswrapper[5133]: I1121 15:32:32.854826 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-rl6wh"] Nov 21 15:32:34 crc kubenswrapper[5133]: I1121 15:32:34.771187 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-rl6wh" podUID="1924ca56-41bb-4baf-80e0-880785417fcd" containerName="registry-server" containerID="cri-o://4ab362cdf9a455af2df417ed41f3a96b8759d655160e01760b87ca60ce5abd65" gracePeriod=2 Nov 21 15:32:35 crc kubenswrapper[5133]: I1121 15:32:35.179975 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rl6wh" Nov 21 15:32:35 crc kubenswrapper[5133]: I1121 15:32:35.301730 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1924ca56-41bb-4baf-80e0-880785417fcd-catalog-content\") pod \"1924ca56-41bb-4baf-80e0-880785417fcd\" (UID: \"1924ca56-41bb-4baf-80e0-880785417fcd\") " Nov 21 15:32:35 crc kubenswrapper[5133]: I1121 15:32:35.301851 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1924ca56-41bb-4baf-80e0-880785417fcd-utilities\") pod \"1924ca56-41bb-4baf-80e0-880785417fcd\" (UID: \"1924ca56-41bb-4baf-80e0-880785417fcd\") " Nov 21 15:32:35 crc kubenswrapper[5133]: I1121 15:32:35.302747 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1924ca56-41bb-4baf-80e0-880785417fcd-utilities" (OuterVolumeSpecName: "utilities") pod "1924ca56-41bb-4baf-80e0-880785417fcd" (UID: "1924ca56-41bb-4baf-80e0-880785417fcd"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:32:35 crc kubenswrapper[5133]: I1121 15:32:35.302851 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gbnlb\" (UniqueName: \"kubernetes.io/projected/1924ca56-41bb-4baf-80e0-880785417fcd-kube-api-access-gbnlb\") pod \"1924ca56-41bb-4baf-80e0-880785417fcd\" (UID: \"1924ca56-41bb-4baf-80e0-880785417fcd\") " Nov 21 15:32:35 crc kubenswrapper[5133]: I1121 15:32:35.303383 5133 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1924ca56-41bb-4baf-80e0-880785417fcd-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 15:32:35 crc kubenswrapper[5133]: I1121 15:32:35.308220 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1924ca56-41bb-4baf-80e0-880785417fcd-kube-api-access-gbnlb" (OuterVolumeSpecName: "kube-api-access-gbnlb") pod "1924ca56-41bb-4baf-80e0-880785417fcd" (UID: "1924ca56-41bb-4baf-80e0-880785417fcd"). InnerVolumeSpecName "kube-api-access-gbnlb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:32:35 crc kubenswrapper[5133]: I1121 15:32:35.348026 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1924ca56-41bb-4baf-80e0-880785417fcd-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1924ca56-41bb-4baf-80e0-880785417fcd" (UID: "1924ca56-41bb-4baf-80e0-880785417fcd"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:32:35 crc kubenswrapper[5133]: I1121 15:32:35.405393 5133 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1924ca56-41bb-4baf-80e0-880785417fcd-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 15:32:35 crc kubenswrapper[5133]: I1121 15:32:35.405431 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gbnlb\" (UniqueName: \"kubernetes.io/projected/1924ca56-41bb-4baf-80e0-880785417fcd-kube-api-access-gbnlb\") on node \"crc\" DevicePath \"\"" Nov 21 15:32:35 crc kubenswrapper[5133]: I1121 15:32:35.782807 5133 generic.go:334] "Generic (PLEG): container finished" podID="1924ca56-41bb-4baf-80e0-880785417fcd" containerID="4ab362cdf9a455af2df417ed41f3a96b8759d655160e01760b87ca60ce5abd65" exitCode=0 Nov 21 15:32:35 crc kubenswrapper[5133]: I1121 15:32:35.782857 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rl6wh" Nov 21 15:32:35 crc kubenswrapper[5133]: I1121 15:32:35.782871 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rl6wh" event={"ID":"1924ca56-41bb-4baf-80e0-880785417fcd","Type":"ContainerDied","Data":"4ab362cdf9a455af2df417ed41f3a96b8759d655160e01760b87ca60ce5abd65"} Nov 21 15:32:35 crc kubenswrapper[5133]: I1121 15:32:35.783187 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rl6wh" event={"ID":"1924ca56-41bb-4baf-80e0-880785417fcd","Type":"ContainerDied","Data":"23f3d5900482dde8f8325dd16a6a830ae7a7bd24dc9abcf3d55c028a6b897ad1"} Nov 21 15:32:35 crc kubenswrapper[5133]: I1121 15:32:35.783206 5133 scope.go:117] "RemoveContainer" containerID="4ab362cdf9a455af2df417ed41f3a96b8759d655160e01760b87ca60ce5abd65" Nov 21 15:32:35 crc kubenswrapper[5133]: I1121 15:32:35.804342 5133 scope.go:117] "RemoveContainer" containerID="3f81b1e2f7fa5a038cec795fd777ef6f3a37a2ab1d04b43ba565c7274bb95ec0" Nov 21 15:32:35 crc kubenswrapper[5133]: I1121 15:32:35.820912 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-rl6wh"] Nov 21 15:32:35 crc kubenswrapper[5133]: I1121 15:32:35.833372 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-rl6wh"] Nov 21 15:32:35 crc kubenswrapper[5133]: I1121 15:32:35.846841 5133 scope.go:117] "RemoveContainer" containerID="60c0cc62798f41ee311fe6df97b812648029e620f5dda5b408ef42ab320d5398" Nov 21 15:32:35 crc kubenswrapper[5133]: I1121 15:32:35.875508 5133 scope.go:117] "RemoveContainer" containerID="4ab362cdf9a455af2df417ed41f3a96b8759d655160e01760b87ca60ce5abd65" Nov 21 15:32:35 crc kubenswrapper[5133]: E1121 15:32:35.876098 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4ab362cdf9a455af2df417ed41f3a96b8759d655160e01760b87ca60ce5abd65\": container with ID starting with 4ab362cdf9a455af2df417ed41f3a96b8759d655160e01760b87ca60ce5abd65 not found: ID does not exist" containerID="4ab362cdf9a455af2df417ed41f3a96b8759d655160e01760b87ca60ce5abd65" Nov 21 15:32:35 crc kubenswrapper[5133]: I1121 15:32:35.876138 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4ab362cdf9a455af2df417ed41f3a96b8759d655160e01760b87ca60ce5abd65"} err="failed to get container status \"4ab362cdf9a455af2df417ed41f3a96b8759d655160e01760b87ca60ce5abd65\": rpc error: code = NotFound desc = could not find container \"4ab362cdf9a455af2df417ed41f3a96b8759d655160e01760b87ca60ce5abd65\": container with ID starting with 4ab362cdf9a455af2df417ed41f3a96b8759d655160e01760b87ca60ce5abd65 not found: ID does not exist" Nov 21 15:32:35 crc kubenswrapper[5133]: I1121 15:32:35.876164 5133 scope.go:117] "RemoveContainer" containerID="3f81b1e2f7fa5a038cec795fd777ef6f3a37a2ab1d04b43ba565c7274bb95ec0" Nov 21 15:32:35 crc kubenswrapper[5133]: E1121 15:32:35.876500 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3f81b1e2f7fa5a038cec795fd777ef6f3a37a2ab1d04b43ba565c7274bb95ec0\": container with ID starting with 3f81b1e2f7fa5a038cec795fd777ef6f3a37a2ab1d04b43ba565c7274bb95ec0 not found: ID does not exist" containerID="3f81b1e2f7fa5a038cec795fd777ef6f3a37a2ab1d04b43ba565c7274bb95ec0" Nov 21 15:32:35 crc kubenswrapper[5133]: I1121 15:32:35.876545 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3f81b1e2f7fa5a038cec795fd777ef6f3a37a2ab1d04b43ba565c7274bb95ec0"} err="failed to get container status \"3f81b1e2f7fa5a038cec795fd777ef6f3a37a2ab1d04b43ba565c7274bb95ec0\": rpc error: code = NotFound desc = could not find container \"3f81b1e2f7fa5a038cec795fd777ef6f3a37a2ab1d04b43ba565c7274bb95ec0\": container with ID starting with 3f81b1e2f7fa5a038cec795fd777ef6f3a37a2ab1d04b43ba565c7274bb95ec0 not found: ID does not exist" Nov 21 15:32:35 crc kubenswrapper[5133]: I1121 15:32:35.876562 5133 scope.go:117] "RemoveContainer" containerID="60c0cc62798f41ee311fe6df97b812648029e620f5dda5b408ef42ab320d5398" Nov 21 15:32:35 crc kubenswrapper[5133]: E1121 15:32:35.876831 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"60c0cc62798f41ee311fe6df97b812648029e620f5dda5b408ef42ab320d5398\": container with ID starting with 60c0cc62798f41ee311fe6df97b812648029e620f5dda5b408ef42ab320d5398 not found: ID does not exist" containerID="60c0cc62798f41ee311fe6df97b812648029e620f5dda5b408ef42ab320d5398" Nov 21 15:32:35 crc kubenswrapper[5133]: I1121 15:32:35.876860 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"60c0cc62798f41ee311fe6df97b812648029e620f5dda5b408ef42ab320d5398"} err="failed to get container status \"60c0cc62798f41ee311fe6df97b812648029e620f5dda5b408ef42ab320d5398\": rpc error: code = NotFound desc = could not find container \"60c0cc62798f41ee311fe6df97b812648029e620f5dda5b408ef42ab320d5398\": container with ID starting with 60c0cc62798f41ee311fe6df97b812648029e620f5dda5b408ef42ab320d5398 not found: ID does not exist" Nov 21 15:32:36 crc kubenswrapper[5133]: I1121 15:32:36.472802 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1924ca56-41bb-4baf-80e0-880785417fcd" path="/var/lib/kubelet/pods/1924ca56-41bb-4baf-80e0-880785417fcd/volumes" Nov 21 15:32:44 crc kubenswrapper[5133]: I1121 15:32:44.458877 5133 scope.go:117] "RemoveContainer" containerID="b88d3b7b176333c05c13a9dc77345d383250a38ab5512a29acb877d8889770b9" Nov 21 15:32:44 crc kubenswrapper[5133]: E1121 15:32:44.460197 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:32:55 crc kubenswrapper[5133]: I1121 15:32:55.458854 5133 scope.go:117] "RemoveContainer" containerID="b88d3b7b176333c05c13a9dc77345d383250a38ab5512a29acb877d8889770b9" Nov 21 15:32:55 crc kubenswrapper[5133]: E1121 15:32:55.460112 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:33:10 crc kubenswrapper[5133]: I1121 15:33:10.458146 5133 scope.go:117] "RemoveContainer" containerID="b88d3b7b176333c05c13a9dc77345d383250a38ab5512a29acb877d8889770b9" Nov 21 15:33:10 crc kubenswrapper[5133]: E1121 15:33:10.458941 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:33:24 crc kubenswrapper[5133]: I1121 15:33:24.458047 5133 scope.go:117] "RemoveContainer" containerID="b88d3b7b176333c05c13a9dc77345d383250a38ab5512a29acb877d8889770b9" Nov 21 15:33:24 crc kubenswrapper[5133]: E1121 15:33:24.459226 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:33:36 crc kubenswrapper[5133]: I1121 15:33:36.457697 5133 scope.go:117] "RemoveContainer" containerID="b88d3b7b176333c05c13a9dc77345d383250a38ab5512a29acb877d8889770b9" Nov 21 15:33:36 crc kubenswrapper[5133]: E1121 15:33:36.459270 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:33:50 crc kubenswrapper[5133]: I1121 15:33:50.457398 5133 scope.go:117] "RemoveContainer" containerID="b88d3b7b176333c05c13a9dc77345d383250a38ab5512a29acb877d8889770b9" Nov 21 15:33:50 crc kubenswrapper[5133]: E1121 15:33:50.458135 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:34:03 crc kubenswrapper[5133]: I1121 15:34:03.457556 5133 scope.go:117] "RemoveContainer" containerID="b88d3b7b176333c05c13a9dc77345d383250a38ab5512a29acb877d8889770b9" Nov 21 15:34:03 crc kubenswrapper[5133]: E1121 15:34:03.458347 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:34:18 crc kubenswrapper[5133]: I1121 15:34:18.458424 5133 scope.go:117] "RemoveContainer" containerID="b88d3b7b176333c05c13a9dc77345d383250a38ab5512a29acb877d8889770b9" Nov 21 15:34:18 crc kubenswrapper[5133]: E1121 15:34:18.459333 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:34:32 crc kubenswrapper[5133]: I1121 15:34:32.463913 5133 scope.go:117] "RemoveContainer" containerID="b88d3b7b176333c05c13a9dc77345d383250a38ab5512a29acb877d8889770b9" Nov 21 15:34:32 crc kubenswrapper[5133]: E1121 15:34:32.465626 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:34:47 crc kubenswrapper[5133]: I1121 15:34:47.458414 5133 scope.go:117] "RemoveContainer" containerID="b88d3b7b176333c05c13a9dc77345d383250a38ab5512a29acb877d8889770b9" Nov 21 15:34:47 crc kubenswrapper[5133]: E1121 15:34:47.459330 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:35:01 crc kubenswrapper[5133]: I1121 15:35:01.457885 5133 scope.go:117] "RemoveContainer" containerID="b88d3b7b176333c05c13a9dc77345d383250a38ab5512a29acb877d8889770b9" Nov 21 15:35:01 crc kubenswrapper[5133]: E1121 15:35:01.458776 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:35:13 crc kubenswrapper[5133]: I1121 15:35:13.458044 5133 scope.go:117] "RemoveContainer" containerID="b88d3b7b176333c05c13a9dc77345d383250a38ab5512a29acb877d8889770b9" Nov 21 15:35:13 crc kubenswrapper[5133]: E1121 15:35:13.458766 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:35:22 crc kubenswrapper[5133]: I1121 15:35:22.232851 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-x7624"] Nov 21 15:35:22 crc kubenswrapper[5133]: E1121 15:35:22.237765 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1924ca56-41bb-4baf-80e0-880785417fcd" containerName="extract-utilities" Nov 21 15:35:22 crc kubenswrapper[5133]: I1121 15:35:22.237798 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="1924ca56-41bb-4baf-80e0-880785417fcd" containerName="extract-utilities" Nov 21 15:35:22 crc kubenswrapper[5133]: E1121 15:35:22.237825 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1924ca56-41bb-4baf-80e0-880785417fcd" containerName="extract-content" Nov 21 15:35:22 crc kubenswrapper[5133]: I1121 15:35:22.237834 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="1924ca56-41bb-4baf-80e0-880785417fcd" containerName="extract-content" Nov 21 15:35:22 crc kubenswrapper[5133]: E1121 15:35:22.237873 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1924ca56-41bb-4baf-80e0-880785417fcd" containerName="registry-server" Nov 21 15:35:22 crc kubenswrapper[5133]: I1121 15:35:22.237883 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="1924ca56-41bb-4baf-80e0-880785417fcd" containerName="registry-server" Nov 21 15:35:22 crc kubenswrapper[5133]: I1121 15:35:22.238150 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="1924ca56-41bb-4baf-80e0-880785417fcd" containerName="registry-server" Nov 21 15:35:22 crc kubenswrapper[5133]: I1121 15:35:22.240023 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-x7624" Nov 21 15:35:22 crc kubenswrapper[5133]: I1121 15:35:22.240716 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-x7624"] Nov 21 15:35:22 crc kubenswrapper[5133]: I1121 15:35:22.281078 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b27e41df-e38c-4789-81d3-be32c0bcee28-catalog-content\") pod \"redhat-marketplace-x7624\" (UID: \"b27e41df-e38c-4789-81d3-be32c0bcee28\") " pod="openshift-marketplace/redhat-marketplace-x7624" Nov 21 15:35:22 crc kubenswrapper[5133]: I1121 15:35:22.281255 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-klhkm\" (UniqueName: \"kubernetes.io/projected/b27e41df-e38c-4789-81d3-be32c0bcee28-kube-api-access-klhkm\") pod \"redhat-marketplace-x7624\" (UID: \"b27e41df-e38c-4789-81d3-be32c0bcee28\") " pod="openshift-marketplace/redhat-marketplace-x7624" Nov 21 15:35:22 crc kubenswrapper[5133]: I1121 15:35:22.281397 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b27e41df-e38c-4789-81d3-be32c0bcee28-utilities\") pod \"redhat-marketplace-x7624\" (UID: \"b27e41df-e38c-4789-81d3-be32c0bcee28\") " pod="openshift-marketplace/redhat-marketplace-x7624" Nov 21 15:35:22 crc kubenswrapper[5133]: I1121 15:35:22.383520 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b27e41df-e38c-4789-81d3-be32c0bcee28-catalog-content\") pod \"redhat-marketplace-x7624\" (UID: \"b27e41df-e38c-4789-81d3-be32c0bcee28\") " pod="openshift-marketplace/redhat-marketplace-x7624" Nov 21 15:35:22 crc kubenswrapper[5133]: I1121 15:35:22.383681 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-klhkm\" (UniqueName: \"kubernetes.io/projected/b27e41df-e38c-4789-81d3-be32c0bcee28-kube-api-access-klhkm\") pod \"redhat-marketplace-x7624\" (UID: \"b27e41df-e38c-4789-81d3-be32c0bcee28\") " pod="openshift-marketplace/redhat-marketplace-x7624" Nov 21 15:35:22 crc kubenswrapper[5133]: I1121 15:35:22.383779 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b27e41df-e38c-4789-81d3-be32c0bcee28-utilities\") pod \"redhat-marketplace-x7624\" (UID: \"b27e41df-e38c-4789-81d3-be32c0bcee28\") " pod="openshift-marketplace/redhat-marketplace-x7624" Nov 21 15:35:22 crc kubenswrapper[5133]: I1121 15:35:22.384252 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b27e41df-e38c-4789-81d3-be32c0bcee28-utilities\") pod \"redhat-marketplace-x7624\" (UID: \"b27e41df-e38c-4789-81d3-be32c0bcee28\") " pod="openshift-marketplace/redhat-marketplace-x7624" Nov 21 15:35:22 crc kubenswrapper[5133]: I1121 15:35:22.385357 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b27e41df-e38c-4789-81d3-be32c0bcee28-catalog-content\") pod \"redhat-marketplace-x7624\" (UID: \"b27e41df-e38c-4789-81d3-be32c0bcee28\") " pod="openshift-marketplace/redhat-marketplace-x7624" Nov 21 15:35:22 crc kubenswrapper[5133]: I1121 15:35:22.411582 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-klhkm\" (UniqueName: \"kubernetes.io/projected/b27e41df-e38c-4789-81d3-be32c0bcee28-kube-api-access-klhkm\") pod \"redhat-marketplace-x7624\" (UID: \"b27e41df-e38c-4789-81d3-be32c0bcee28\") " pod="openshift-marketplace/redhat-marketplace-x7624" Nov 21 15:35:22 crc kubenswrapper[5133]: I1121 15:35:22.568754 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-x7624" Nov 21 15:35:23 crc kubenswrapper[5133]: I1121 15:35:23.028365 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-x7624"] Nov 21 15:35:23 crc kubenswrapper[5133]: I1121 15:35:23.431136 5133 generic.go:334] "Generic (PLEG): container finished" podID="b27e41df-e38c-4789-81d3-be32c0bcee28" containerID="fd0eb628fccf782e81c6f4c7c9e6dfa938a842d066fda238d3eb5a4793702d9c" exitCode=0 Nov 21 15:35:23 crc kubenswrapper[5133]: I1121 15:35:23.431204 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-x7624" event={"ID":"b27e41df-e38c-4789-81d3-be32c0bcee28","Type":"ContainerDied","Data":"fd0eb628fccf782e81c6f4c7c9e6dfa938a842d066fda238d3eb5a4793702d9c"} Nov 21 15:35:23 crc kubenswrapper[5133]: I1121 15:35:23.431511 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-x7624" event={"ID":"b27e41df-e38c-4789-81d3-be32c0bcee28","Type":"ContainerStarted","Data":"ad135e856097f04c49e17ef334bbb7a96a8e1bd41310b23c91cb50e91e71c1ac"} Nov 21 15:35:23 crc kubenswrapper[5133]: I1121 15:35:23.433345 5133 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 21 15:35:25 crc kubenswrapper[5133]: I1121 15:35:25.449846 5133 generic.go:334] "Generic (PLEG): container finished" podID="b27e41df-e38c-4789-81d3-be32c0bcee28" containerID="2f859663f3a609d62af2c66bfd59c74ba405cff9eacdc6c0a918b94235470793" exitCode=0 Nov 21 15:35:25 crc kubenswrapper[5133]: I1121 15:35:25.449894 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-x7624" event={"ID":"b27e41df-e38c-4789-81d3-be32c0bcee28","Type":"ContainerDied","Data":"2f859663f3a609d62af2c66bfd59c74ba405cff9eacdc6c0a918b94235470793"} Nov 21 15:35:26 crc kubenswrapper[5133]: I1121 15:35:26.458442 5133 scope.go:117] "RemoveContainer" containerID="b88d3b7b176333c05c13a9dc77345d383250a38ab5512a29acb877d8889770b9" Nov 21 15:35:26 crc kubenswrapper[5133]: E1121 15:35:26.459872 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:35:27 crc kubenswrapper[5133]: I1121 15:35:27.471935 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-x7624" event={"ID":"b27e41df-e38c-4789-81d3-be32c0bcee28","Type":"ContainerStarted","Data":"affcf978fc1359333c690b6e1cb6efd99be870082f9fcf8b027a47be1dba39e5"} Nov 21 15:35:27 crc kubenswrapper[5133]: I1121 15:35:27.495304 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-x7624" podStartSLOduration=2.374798509 podStartE2EDuration="5.495283535s" podCreationTimestamp="2025-11-21 15:35:22 +0000 UTC" firstStartedPulling="2025-11-21 15:35:23.433076897 +0000 UTC m=+6783.230909155" lastFinishedPulling="2025-11-21 15:35:26.553561923 +0000 UTC m=+6786.351394181" observedRunningTime="2025-11-21 15:35:27.491761161 +0000 UTC m=+6787.289593409" watchObservedRunningTime="2025-11-21 15:35:27.495283535 +0000 UTC m=+6787.293115783" Nov 21 15:35:32 crc kubenswrapper[5133]: I1121 15:35:32.569144 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-x7624" Nov 21 15:35:32 crc kubenswrapper[5133]: I1121 15:35:32.569746 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-x7624" Nov 21 15:35:32 crc kubenswrapper[5133]: I1121 15:35:32.634383 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-x7624" Nov 21 15:35:33 crc kubenswrapper[5133]: I1121 15:35:33.611615 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-x7624" Nov 21 15:35:33 crc kubenswrapper[5133]: I1121 15:35:33.677064 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-x7624"] Nov 21 15:35:35 crc kubenswrapper[5133]: I1121 15:35:35.564086 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-x7624" podUID="b27e41df-e38c-4789-81d3-be32c0bcee28" containerName="registry-server" containerID="cri-o://affcf978fc1359333c690b6e1cb6efd99be870082f9fcf8b027a47be1dba39e5" gracePeriod=2 Nov 21 15:35:36 crc kubenswrapper[5133]: I1121 15:35:36.076738 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-x7624" Nov 21 15:35:36 crc kubenswrapper[5133]: I1121 15:35:36.185870 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b27e41df-e38c-4789-81d3-be32c0bcee28-catalog-content\") pod \"b27e41df-e38c-4789-81d3-be32c0bcee28\" (UID: \"b27e41df-e38c-4789-81d3-be32c0bcee28\") " Nov 21 15:35:36 crc kubenswrapper[5133]: I1121 15:35:36.186279 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-klhkm\" (UniqueName: \"kubernetes.io/projected/b27e41df-e38c-4789-81d3-be32c0bcee28-kube-api-access-klhkm\") pod \"b27e41df-e38c-4789-81d3-be32c0bcee28\" (UID: \"b27e41df-e38c-4789-81d3-be32c0bcee28\") " Nov 21 15:35:36 crc kubenswrapper[5133]: I1121 15:35:36.186416 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b27e41df-e38c-4789-81d3-be32c0bcee28-utilities\") pod \"b27e41df-e38c-4789-81d3-be32c0bcee28\" (UID: \"b27e41df-e38c-4789-81d3-be32c0bcee28\") " Nov 21 15:35:36 crc kubenswrapper[5133]: I1121 15:35:36.187707 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b27e41df-e38c-4789-81d3-be32c0bcee28-utilities" (OuterVolumeSpecName: "utilities") pod "b27e41df-e38c-4789-81d3-be32c0bcee28" (UID: "b27e41df-e38c-4789-81d3-be32c0bcee28"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:35:36 crc kubenswrapper[5133]: I1121 15:35:36.193296 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b27e41df-e38c-4789-81d3-be32c0bcee28-kube-api-access-klhkm" (OuterVolumeSpecName: "kube-api-access-klhkm") pod "b27e41df-e38c-4789-81d3-be32c0bcee28" (UID: "b27e41df-e38c-4789-81d3-be32c0bcee28"). InnerVolumeSpecName "kube-api-access-klhkm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:35:36 crc kubenswrapper[5133]: I1121 15:35:36.211082 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b27e41df-e38c-4789-81d3-be32c0bcee28-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b27e41df-e38c-4789-81d3-be32c0bcee28" (UID: "b27e41df-e38c-4789-81d3-be32c0bcee28"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:35:36 crc kubenswrapper[5133]: I1121 15:35:36.289587 5133 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b27e41df-e38c-4789-81d3-be32c0bcee28-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:36 crc kubenswrapper[5133]: I1121 15:35:36.289633 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-klhkm\" (UniqueName: \"kubernetes.io/projected/b27e41df-e38c-4789-81d3-be32c0bcee28-kube-api-access-klhkm\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:36 crc kubenswrapper[5133]: I1121 15:35:36.289643 5133 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b27e41df-e38c-4789-81d3-be32c0bcee28-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 15:35:36 crc kubenswrapper[5133]: I1121 15:35:36.573840 5133 generic.go:334] "Generic (PLEG): container finished" podID="b27e41df-e38c-4789-81d3-be32c0bcee28" containerID="affcf978fc1359333c690b6e1cb6efd99be870082f9fcf8b027a47be1dba39e5" exitCode=0 Nov 21 15:35:36 crc kubenswrapper[5133]: I1121 15:35:36.573897 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-x7624" event={"ID":"b27e41df-e38c-4789-81d3-be32c0bcee28","Type":"ContainerDied","Data":"affcf978fc1359333c690b6e1cb6efd99be870082f9fcf8b027a47be1dba39e5"} Nov 21 15:35:36 crc kubenswrapper[5133]: I1121 15:35:36.573947 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-x7624" event={"ID":"b27e41df-e38c-4789-81d3-be32c0bcee28","Type":"ContainerDied","Data":"ad135e856097f04c49e17ef334bbb7a96a8e1bd41310b23c91cb50e91e71c1ac"} Nov 21 15:35:36 crc kubenswrapper[5133]: I1121 15:35:36.573961 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-x7624" Nov 21 15:35:36 crc kubenswrapper[5133]: I1121 15:35:36.573974 5133 scope.go:117] "RemoveContainer" containerID="affcf978fc1359333c690b6e1cb6efd99be870082f9fcf8b027a47be1dba39e5" Nov 21 15:35:36 crc kubenswrapper[5133]: I1121 15:35:36.598867 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-x7624"] Nov 21 15:35:36 crc kubenswrapper[5133]: I1121 15:35:36.602439 5133 scope.go:117] "RemoveContainer" containerID="2f859663f3a609d62af2c66bfd59c74ba405cff9eacdc6c0a918b94235470793" Nov 21 15:35:36 crc kubenswrapper[5133]: I1121 15:35:36.610777 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-x7624"] Nov 21 15:35:36 crc kubenswrapper[5133]: I1121 15:35:36.620984 5133 scope.go:117] "RemoveContainer" containerID="fd0eb628fccf782e81c6f4c7c9e6dfa938a842d066fda238d3eb5a4793702d9c" Nov 21 15:35:36 crc kubenswrapper[5133]: I1121 15:35:36.673274 5133 scope.go:117] "RemoveContainer" containerID="affcf978fc1359333c690b6e1cb6efd99be870082f9fcf8b027a47be1dba39e5" Nov 21 15:35:36 crc kubenswrapper[5133]: E1121 15:35:36.673750 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"affcf978fc1359333c690b6e1cb6efd99be870082f9fcf8b027a47be1dba39e5\": container with ID starting with affcf978fc1359333c690b6e1cb6efd99be870082f9fcf8b027a47be1dba39e5 not found: ID does not exist" containerID="affcf978fc1359333c690b6e1cb6efd99be870082f9fcf8b027a47be1dba39e5" Nov 21 15:35:36 crc kubenswrapper[5133]: I1121 15:35:36.673793 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"affcf978fc1359333c690b6e1cb6efd99be870082f9fcf8b027a47be1dba39e5"} err="failed to get container status \"affcf978fc1359333c690b6e1cb6efd99be870082f9fcf8b027a47be1dba39e5\": rpc error: code = NotFound desc = could not find container \"affcf978fc1359333c690b6e1cb6efd99be870082f9fcf8b027a47be1dba39e5\": container with ID starting with affcf978fc1359333c690b6e1cb6efd99be870082f9fcf8b027a47be1dba39e5 not found: ID does not exist" Nov 21 15:35:36 crc kubenswrapper[5133]: I1121 15:35:36.673822 5133 scope.go:117] "RemoveContainer" containerID="2f859663f3a609d62af2c66bfd59c74ba405cff9eacdc6c0a918b94235470793" Nov 21 15:35:36 crc kubenswrapper[5133]: E1121 15:35:36.674226 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2f859663f3a609d62af2c66bfd59c74ba405cff9eacdc6c0a918b94235470793\": container with ID starting with 2f859663f3a609d62af2c66bfd59c74ba405cff9eacdc6c0a918b94235470793 not found: ID does not exist" containerID="2f859663f3a609d62af2c66bfd59c74ba405cff9eacdc6c0a918b94235470793" Nov 21 15:35:36 crc kubenswrapper[5133]: I1121 15:35:36.674272 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2f859663f3a609d62af2c66bfd59c74ba405cff9eacdc6c0a918b94235470793"} err="failed to get container status \"2f859663f3a609d62af2c66bfd59c74ba405cff9eacdc6c0a918b94235470793\": rpc error: code = NotFound desc = could not find container \"2f859663f3a609d62af2c66bfd59c74ba405cff9eacdc6c0a918b94235470793\": container with ID starting with 2f859663f3a609d62af2c66bfd59c74ba405cff9eacdc6c0a918b94235470793 not found: ID does not exist" Nov 21 15:35:36 crc kubenswrapper[5133]: I1121 15:35:36.674301 5133 scope.go:117] "RemoveContainer" containerID="fd0eb628fccf782e81c6f4c7c9e6dfa938a842d066fda238d3eb5a4793702d9c" Nov 21 15:35:36 crc kubenswrapper[5133]: E1121 15:35:36.674543 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fd0eb628fccf782e81c6f4c7c9e6dfa938a842d066fda238d3eb5a4793702d9c\": container with ID starting with fd0eb628fccf782e81c6f4c7c9e6dfa938a842d066fda238d3eb5a4793702d9c not found: ID does not exist" containerID="fd0eb628fccf782e81c6f4c7c9e6dfa938a842d066fda238d3eb5a4793702d9c" Nov 21 15:35:36 crc kubenswrapper[5133]: I1121 15:35:36.674563 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fd0eb628fccf782e81c6f4c7c9e6dfa938a842d066fda238d3eb5a4793702d9c"} err="failed to get container status \"fd0eb628fccf782e81c6f4c7c9e6dfa938a842d066fda238d3eb5a4793702d9c\": rpc error: code = NotFound desc = could not find container \"fd0eb628fccf782e81c6f4c7c9e6dfa938a842d066fda238d3eb5a4793702d9c\": container with ID starting with fd0eb628fccf782e81c6f4c7c9e6dfa938a842d066fda238d3eb5a4793702d9c not found: ID does not exist" Nov 21 15:35:38 crc kubenswrapper[5133]: I1121 15:35:38.458178 5133 scope.go:117] "RemoveContainer" containerID="b88d3b7b176333c05c13a9dc77345d383250a38ab5512a29acb877d8889770b9" Nov 21 15:35:38 crc kubenswrapper[5133]: E1121 15:35:38.458868 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:35:38 crc kubenswrapper[5133]: I1121 15:35:38.484972 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b27e41df-e38c-4789-81d3-be32c0bcee28" path="/var/lib/kubelet/pods/b27e41df-e38c-4789-81d3-be32c0bcee28/volumes" Nov 21 15:35:50 crc kubenswrapper[5133]: I1121 15:35:50.460758 5133 scope.go:117] "RemoveContainer" containerID="b88d3b7b176333c05c13a9dc77345d383250a38ab5512a29acb877d8889770b9" Nov 21 15:35:50 crc kubenswrapper[5133]: E1121 15:35:50.462162 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:36:02 crc kubenswrapper[5133]: I1121 15:36:02.457529 5133 scope.go:117] "RemoveContainer" containerID="b88d3b7b176333c05c13a9dc77345d383250a38ab5512a29acb877d8889770b9" Nov 21 15:36:02 crc kubenswrapper[5133]: E1121 15:36:02.458853 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:36:17 crc kubenswrapper[5133]: I1121 15:36:17.458973 5133 scope.go:117] "RemoveContainer" containerID="b88d3b7b176333c05c13a9dc77345d383250a38ab5512a29acb877d8889770b9" Nov 21 15:36:17 crc kubenswrapper[5133]: E1121 15:36:17.459857 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:36:28 crc kubenswrapper[5133]: I1121 15:36:28.458731 5133 scope.go:117] "RemoveContainer" containerID="b88d3b7b176333c05c13a9dc77345d383250a38ab5512a29acb877d8889770b9" Nov 21 15:36:29 crc kubenswrapper[5133]: I1121 15:36:29.099344 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" event={"ID":"52f5a729-05d1-4f84-a216-1df3233af57d","Type":"ContainerStarted","Data":"687490ee2e06821d0dabcc541391ccb5223aacb17e1dc37e0067e4b8571cb9fa"} Nov 21 15:38:07 crc kubenswrapper[5133]: I1121 15:38:07.108423 5133 generic.go:334] "Generic (PLEG): container finished" podID="9c76c434-18e5-410a-9b4d-1538e6434c05" containerID="904b1ea40cfda33e5bcd014460ab6e62cdc159e8a25a9ae37c8ec525d109c0ba" exitCode=1 Nov 21 15:38:07 crc kubenswrapper[5133]: I1121 15:38:07.108559 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"9c76c434-18e5-410a-9b4d-1538e6434c05","Type":"ContainerDied","Data":"904b1ea40cfda33e5bcd014460ab6e62cdc159e8a25a9ae37c8ec525d109c0ba"} Nov 21 15:38:08 crc kubenswrapper[5133]: I1121 15:38:08.534335 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 21 15:38:08 crc kubenswrapper[5133]: I1121 15:38:08.594117 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-logs\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"9c76c434-18e5-410a-9b4d-1538e6434c05\" (UID: \"9c76c434-18e5-410a-9b4d-1538e6434c05\") " Nov 21 15:38:08 crc kubenswrapper[5133]: I1121 15:38:08.594531 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/9c76c434-18e5-410a-9b4d-1538e6434c05-test-operator-ephemeral-temporary\") pod \"9c76c434-18e5-410a-9b4d-1538e6434c05\" (UID: \"9c76c434-18e5-410a-9b4d-1538e6434c05\") " Nov 21 15:38:08 crc kubenswrapper[5133]: I1121 15:38:08.594659 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/9c76c434-18e5-410a-9b4d-1538e6434c05-ca-certs\") pod \"9c76c434-18e5-410a-9b4d-1538e6434c05\" (UID: \"9c76c434-18e5-410a-9b4d-1538e6434c05\") " Nov 21 15:38:08 crc kubenswrapper[5133]: I1121 15:38:08.594754 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/9c76c434-18e5-410a-9b4d-1538e6434c05-openstack-config-secret\") pod \"9c76c434-18e5-410a-9b4d-1538e6434c05\" (UID: \"9c76c434-18e5-410a-9b4d-1538e6434c05\") " Nov 21 15:38:08 crc kubenswrapper[5133]: I1121 15:38:08.594833 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/9c76c434-18e5-410a-9b4d-1538e6434c05-openstack-config\") pod \"9c76c434-18e5-410a-9b4d-1538e6434c05\" (UID: \"9c76c434-18e5-410a-9b4d-1538e6434c05\") " Nov 21 15:38:08 crc kubenswrapper[5133]: I1121 15:38:08.594889 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9c76c434-18e5-410a-9b4d-1538e6434c05-ssh-key\") pod \"9c76c434-18e5-410a-9b4d-1538e6434c05\" (UID: \"9c76c434-18e5-410a-9b4d-1538e6434c05\") " Nov 21 15:38:08 crc kubenswrapper[5133]: I1121 15:38:08.594926 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/9c76c434-18e5-410a-9b4d-1538e6434c05-test-operator-ephemeral-workdir\") pod \"9c76c434-18e5-410a-9b4d-1538e6434c05\" (UID: \"9c76c434-18e5-410a-9b4d-1538e6434c05\") " Nov 21 15:38:08 crc kubenswrapper[5133]: I1121 15:38:08.594959 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t2wnd\" (UniqueName: \"kubernetes.io/projected/9c76c434-18e5-410a-9b4d-1538e6434c05-kube-api-access-t2wnd\") pod \"9c76c434-18e5-410a-9b4d-1538e6434c05\" (UID: \"9c76c434-18e5-410a-9b4d-1538e6434c05\") " Nov 21 15:38:08 crc kubenswrapper[5133]: I1121 15:38:08.594999 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9c76c434-18e5-410a-9b4d-1538e6434c05-config-data\") pod \"9c76c434-18e5-410a-9b4d-1538e6434c05\" (UID: \"9c76c434-18e5-410a-9b4d-1538e6434c05\") " Nov 21 15:38:08 crc kubenswrapper[5133]: I1121 15:38:08.596520 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9c76c434-18e5-410a-9b4d-1538e6434c05-test-operator-ephemeral-temporary" (OuterVolumeSpecName: "test-operator-ephemeral-temporary") pod "9c76c434-18e5-410a-9b4d-1538e6434c05" (UID: "9c76c434-18e5-410a-9b4d-1538e6434c05"). InnerVolumeSpecName "test-operator-ephemeral-temporary". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:38:08 crc kubenswrapper[5133]: I1121 15:38:08.596975 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9c76c434-18e5-410a-9b4d-1538e6434c05-config-data" (OuterVolumeSpecName: "config-data") pod "9c76c434-18e5-410a-9b4d-1538e6434c05" (UID: "9c76c434-18e5-410a-9b4d-1538e6434c05"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:38:08 crc kubenswrapper[5133]: I1121 15:38:08.600161 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage05-crc" (OuterVolumeSpecName: "test-operator-logs") pod "9c76c434-18e5-410a-9b4d-1538e6434c05" (UID: "9c76c434-18e5-410a-9b4d-1538e6434c05"). InnerVolumeSpecName "local-storage05-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 21 15:38:08 crc kubenswrapper[5133]: I1121 15:38:08.605323 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9c76c434-18e5-410a-9b4d-1538e6434c05-test-operator-ephemeral-workdir" (OuterVolumeSpecName: "test-operator-ephemeral-workdir") pod "9c76c434-18e5-410a-9b4d-1538e6434c05" (UID: "9c76c434-18e5-410a-9b4d-1538e6434c05"). InnerVolumeSpecName "test-operator-ephemeral-workdir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:38:08 crc kubenswrapper[5133]: I1121 15:38:08.611140 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9c76c434-18e5-410a-9b4d-1538e6434c05-kube-api-access-t2wnd" (OuterVolumeSpecName: "kube-api-access-t2wnd") pod "9c76c434-18e5-410a-9b4d-1538e6434c05" (UID: "9c76c434-18e5-410a-9b4d-1538e6434c05"). InnerVolumeSpecName "kube-api-access-t2wnd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:38:08 crc kubenswrapper[5133]: I1121 15:38:08.621942 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9c76c434-18e5-410a-9b4d-1538e6434c05-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "9c76c434-18e5-410a-9b4d-1538e6434c05" (UID: "9c76c434-18e5-410a-9b4d-1538e6434c05"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:38:08 crc kubenswrapper[5133]: I1121 15:38:08.629202 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9c76c434-18e5-410a-9b4d-1538e6434c05-ca-certs" (OuterVolumeSpecName: "ca-certs") pod "9c76c434-18e5-410a-9b4d-1538e6434c05" (UID: "9c76c434-18e5-410a-9b4d-1538e6434c05"). InnerVolumeSpecName "ca-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:38:08 crc kubenswrapper[5133]: I1121 15:38:08.629630 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9c76c434-18e5-410a-9b4d-1538e6434c05-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "9c76c434-18e5-410a-9b4d-1538e6434c05" (UID: "9c76c434-18e5-410a-9b4d-1538e6434c05"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:38:08 crc kubenswrapper[5133]: I1121 15:38:08.649978 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9c76c434-18e5-410a-9b4d-1538e6434c05-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "9c76c434-18e5-410a-9b4d-1538e6434c05" (UID: "9c76c434-18e5-410a-9b4d-1538e6434c05"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:38:08 crc kubenswrapper[5133]: I1121 15:38:08.697052 5133 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/9c76c434-18e5-410a-9b4d-1538e6434c05-test-operator-ephemeral-workdir\") on node \"crc\" DevicePath \"\"" Nov 21 15:38:08 crc kubenswrapper[5133]: I1121 15:38:08.697088 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t2wnd\" (UniqueName: \"kubernetes.io/projected/9c76c434-18e5-410a-9b4d-1538e6434c05-kube-api-access-t2wnd\") on node \"crc\" DevicePath \"\"" Nov 21 15:38:08 crc kubenswrapper[5133]: I1121 15:38:08.697098 5133 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9c76c434-18e5-410a-9b4d-1538e6434c05-config-data\") on node \"crc\" DevicePath \"\"" Nov 21 15:38:08 crc kubenswrapper[5133]: I1121 15:38:08.697123 5133 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" " Nov 21 15:38:08 crc kubenswrapper[5133]: I1121 15:38:08.697132 5133 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/9c76c434-18e5-410a-9b4d-1538e6434c05-test-operator-ephemeral-temporary\") on node \"crc\" DevicePath \"\"" Nov 21 15:38:08 crc kubenswrapper[5133]: I1121 15:38:08.697142 5133 reconciler_common.go:293] "Volume detached for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/9c76c434-18e5-410a-9b4d-1538e6434c05-ca-certs\") on node \"crc\" DevicePath \"\"" Nov 21 15:38:08 crc kubenswrapper[5133]: I1121 15:38:08.697152 5133 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/9c76c434-18e5-410a-9b4d-1538e6434c05-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Nov 21 15:38:08 crc kubenswrapper[5133]: I1121 15:38:08.697160 5133 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/9c76c434-18e5-410a-9b4d-1538e6434c05-openstack-config\") on node \"crc\" DevicePath \"\"" Nov 21 15:38:08 crc kubenswrapper[5133]: I1121 15:38:08.697169 5133 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9c76c434-18e5-410a-9b4d-1538e6434c05-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 21 15:38:08 crc kubenswrapper[5133]: I1121 15:38:08.718473 5133 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage05-crc" (UniqueName: "kubernetes.io/local-volume/local-storage05-crc") on node "crc" Nov 21 15:38:08 crc kubenswrapper[5133]: I1121 15:38:08.800090 5133 reconciler_common.go:293] "Volume detached for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" DevicePath \"\"" Nov 21 15:38:09 crc kubenswrapper[5133]: I1121 15:38:09.132873 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"9c76c434-18e5-410a-9b4d-1538e6434c05","Type":"ContainerDied","Data":"82f07dd6b12b7103444777f009c65abad0c89da8f337ce5185618120de9983f7"} Nov 21 15:38:09 crc kubenswrapper[5133]: I1121 15:38:09.132913 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="82f07dd6b12b7103444777f009c65abad0c89da8f337ce5185618120de9983f7" Nov 21 15:38:09 crc kubenswrapper[5133]: I1121 15:38:09.133034 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 21 15:38:12 crc kubenswrapper[5133]: I1121 15:38:12.184211 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 21 15:38:12 crc kubenswrapper[5133]: E1121 15:38:12.186072 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b27e41df-e38c-4789-81d3-be32c0bcee28" containerName="registry-server" Nov 21 15:38:12 crc kubenswrapper[5133]: I1121 15:38:12.186092 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="b27e41df-e38c-4789-81d3-be32c0bcee28" containerName="registry-server" Nov 21 15:38:12 crc kubenswrapper[5133]: E1121 15:38:12.186115 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9c76c434-18e5-410a-9b4d-1538e6434c05" containerName="tempest-tests-tempest-tests-runner" Nov 21 15:38:12 crc kubenswrapper[5133]: I1121 15:38:12.186124 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="9c76c434-18e5-410a-9b4d-1538e6434c05" containerName="tempest-tests-tempest-tests-runner" Nov 21 15:38:12 crc kubenswrapper[5133]: E1121 15:38:12.186151 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b27e41df-e38c-4789-81d3-be32c0bcee28" containerName="extract-content" Nov 21 15:38:12 crc kubenswrapper[5133]: I1121 15:38:12.186157 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="b27e41df-e38c-4789-81d3-be32c0bcee28" containerName="extract-content" Nov 21 15:38:12 crc kubenswrapper[5133]: E1121 15:38:12.186194 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b27e41df-e38c-4789-81d3-be32c0bcee28" containerName="extract-utilities" Nov 21 15:38:12 crc kubenswrapper[5133]: I1121 15:38:12.186201 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="b27e41df-e38c-4789-81d3-be32c0bcee28" containerName="extract-utilities" Nov 21 15:38:12 crc kubenswrapper[5133]: I1121 15:38:12.186577 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="b27e41df-e38c-4789-81d3-be32c0bcee28" containerName="registry-server" Nov 21 15:38:12 crc kubenswrapper[5133]: I1121 15:38:12.186597 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="9c76c434-18e5-410a-9b4d-1538e6434c05" containerName="tempest-tests-tempest-tests-runner" Nov 21 15:38:12 crc kubenswrapper[5133]: I1121 15:38:12.187690 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 21 15:38:12 crc kubenswrapper[5133]: I1121 15:38:12.190394 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-shdsl" Nov 21 15:38:12 crc kubenswrapper[5133]: I1121 15:38:12.197132 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 21 15:38:12 crc kubenswrapper[5133]: I1121 15:38:12.281408 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kd74s\" (UniqueName: \"kubernetes.io/projected/990c8e46-eae8-45b4-b847-dbaef496403e-kube-api-access-kd74s\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"990c8e46-eae8-45b4-b847-dbaef496403e\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 21 15:38:12 crc kubenswrapper[5133]: I1121 15:38:12.281785 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"990c8e46-eae8-45b4-b847-dbaef496403e\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 21 15:38:12 crc kubenswrapper[5133]: I1121 15:38:12.384233 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"990c8e46-eae8-45b4-b847-dbaef496403e\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 21 15:38:12 crc kubenswrapper[5133]: I1121 15:38:12.384372 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kd74s\" (UniqueName: \"kubernetes.io/projected/990c8e46-eae8-45b4-b847-dbaef496403e-kube-api-access-kd74s\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"990c8e46-eae8-45b4-b847-dbaef496403e\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 21 15:38:12 crc kubenswrapper[5133]: I1121 15:38:12.384702 5133 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"990c8e46-eae8-45b4-b847-dbaef496403e\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 21 15:38:12 crc kubenswrapper[5133]: I1121 15:38:12.403098 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kd74s\" (UniqueName: \"kubernetes.io/projected/990c8e46-eae8-45b4-b847-dbaef496403e-kube-api-access-kd74s\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"990c8e46-eae8-45b4-b847-dbaef496403e\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 21 15:38:12 crc kubenswrapper[5133]: I1121 15:38:12.410136 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"990c8e46-eae8-45b4-b847-dbaef496403e\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 21 15:38:12 crc kubenswrapper[5133]: I1121 15:38:12.513156 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 21 15:38:12 crc kubenswrapper[5133]: I1121 15:38:12.966773 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 21 15:38:13 crc kubenswrapper[5133]: I1121 15:38:13.170051 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"990c8e46-eae8-45b4-b847-dbaef496403e","Type":"ContainerStarted","Data":"ac5155e63eacafd95913158c04caded253387cdbcf9185d08cf890346fabc165"} Nov 21 15:38:15 crc kubenswrapper[5133]: I1121 15:38:15.190240 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"990c8e46-eae8-45b4-b847-dbaef496403e","Type":"ContainerStarted","Data":"d8283ce8503d3d33a999a1c7245f69066d83041625e9835487f61b63e9688998"} Nov 21 15:38:15 crc kubenswrapper[5133]: I1121 15:38:15.208150 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" podStartSLOduration=2.176876967 podStartE2EDuration="3.208126785s" podCreationTimestamp="2025-11-21 15:38:12 +0000 UTC" firstStartedPulling="2025-11-21 15:38:12.978096118 +0000 UTC m=+6952.775928366" lastFinishedPulling="2025-11-21 15:38:14.009345936 +0000 UTC m=+6953.807178184" observedRunningTime="2025-11-21 15:38:15.204344674 +0000 UTC m=+6955.002176992" watchObservedRunningTime="2025-11-21 15:38:15.208126785 +0000 UTC m=+6955.005959043" Nov 21 15:38:53 crc kubenswrapper[5133]: I1121 15:38:53.311121 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 15:38:53 crc kubenswrapper[5133]: I1121 15:38:53.311912 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 15:38:56 crc kubenswrapper[5133]: I1121 15:38:56.913117 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-fft98/must-gather-c4tfn"] Nov 21 15:38:56 crc kubenswrapper[5133]: I1121 15:38:56.915629 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-fft98/must-gather-c4tfn" Nov 21 15:38:56 crc kubenswrapper[5133]: I1121 15:38:56.917466 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-fft98"/"default-dockercfg-2fggn" Nov 21 15:38:56 crc kubenswrapper[5133]: I1121 15:38:56.917603 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-fft98"/"kube-root-ca.crt" Nov 21 15:38:56 crc kubenswrapper[5133]: I1121 15:38:56.917711 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-fft98"/"openshift-service-ca.crt" Nov 21 15:38:56 crc kubenswrapper[5133]: I1121 15:38:56.932380 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-fft98/must-gather-c4tfn"] Nov 21 15:38:57 crc kubenswrapper[5133]: I1121 15:38:57.085032 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2bfh6\" (UniqueName: \"kubernetes.io/projected/b2f02843-c088-41b6-832e-90acb53319aa-kube-api-access-2bfh6\") pod \"must-gather-c4tfn\" (UID: \"b2f02843-c088-41b6-832e-90acb53319aa\") " pod="openshift-must-gather-fft98/must-gather-c4tfn" Nov 21 15:38:57 crc kubenswrapper[5133]: I1121 15:38:57.085157 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/b2f02843-c088-41b6-832e-90acb53319aa-must-gather-output\") pod \"must-gather-c4tfn\" (UID: \"b2f02843-c088-41b6-832e-90acb53319aa\") " pod="openshift-must-gather-fft98/must-gather-c4tfn" Nov 21 15:38:57 crc kubenswrapper[5133]: I1121 15:38:57.186842 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2bfh6\" (UniqueName: \"kubernetes.io/projected/b2f02843-c088-41b6-832e-90acb53319aa-kube-api-access-2bfh6\") pod \"must-gather-c4tfn\" (UID: \"b2f02843-c088-41b6-832e-90acb53319aa\") " pod="openshift-must-gather-fft98/must-gather-c4tfn" Nov 21 15:38:57 crc kubenswrapper[5133]: I1121 15:38:57.187040 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/b2f02843-c088-41b6-832e-90acb53319aa-must-gather-output\") pod \"must-gather-c4tfn\" (UID: \"b2f02843-c088-41b6-832e-90acb53319aa\") " pod="openshift-must-gather-fft98/must-gather-c4tfn" Nov 21 15:38:57 crc kubenswrapper[5133]: I1121 15:38:57.187519 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/b2f02843-c088-41b6-832e-90acb53319aa-must-gather-output\") pod \"must-gather-c4tfn\" (UID: \"b2f02843-c088-41b6-832e-90acb53319aa\") " pod="openshift-must-gather-fft98/must-gather-c4tfn" Nov 21 15:38:57 crc kubenswrapper[5133]: I1121 15:38:57.207249 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2bfh6\" (UniqueName: \"kubernetes.io/projected/b2f02843-c088-41b6-832e-90acb53319aa-kube-api-access-2bfh6\") pod \"must-gather-c4tfn\" (UID: \"b2f02843-c088-41b6-832e-90acb53319aa\") " pod="openshift-must-gather-fft98/must-gather-c4tfn" Nov 21 15:38:57 crc kubenswrapper[5133]: I1121 15:38:57.246584 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-fft98/must-gather-c4tfn" Nov 21 15:38:57 crc kubenswrapper[5133]: I1121 15:38:57.762927 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-fft98/must-gather-c4tfn"] Nov 21 15:38:58 crc kubenswrapper[5133]: I1121 15:38:58.614892 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-fft98/must-gather-c4tfn" event={"ID":"b2f02843-c088-41b6-832e-90acb53319aa","Type":"ContainerStarted","Data":"ea316f643c10122e9764c6ec0b9ac8e9adff57e78d4a26258f6bc1ea511ed037"} Nov 21 15:39:03 crc kubenswrapper[5133]: I1121 15:39:03.663749 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-fft98/must-gather-c4tfn" event={"ID":"b2f02843-c088-41b6-832e-90acb53319aa","Type":"ContainerStarted","Data":"053888ff03a7ce10b44412f548a0b8123200283b7d1e77859f325851df7f8db4"} Nov 21 15:39:04 crc kubenswrapper[5133]: I1121 15:39:04.672101 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-fft98/must-gather-c4tfn" event={"ID":"b2f02843-c088-41b6-832e-90acb53319aa","Type":"ContainerStarted","Data":"cacfb1c0cc8a197a8ad52c6625a29748f6cd6fea086c11bc5bb5817cddd26dc6"} Nov 21 15:39:04 crc kubenswrapper[5133]: I1121 15:39:04.697660 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-fft98/must-gather-c4tfn" podStartSLOduration=3.199932652 podStartE2EDuration="8.697628834s" podCreationTimestamp="2025-11-21 15:38:56 +0000 UTC" firstStartedPulling="2025-11-21 15:38:57.772682706 +0000 UTC m=+6997.570514954" lastFinishedPulling="2025-11-21 15:39:03.270378888 +0000 UTC m=+7003.068211136" observedRunningTime="2025-11-21 15:39:04.68997595 +0000 UTC m=+7004.487808218" watchObservedRunningTime="2025-11-21 15:39:04.697628834 +0000 UTC m=+7004.495461112" Nov 21 15:39:07 crc kubenswrapper[5133]: E1121 15:39:07.194620 5133 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.179:49586->38.102.83.179:33373: write tcp 38.102.83.179:49586->38.102.83.179:33373: write: broken pipe Nov 21 15:39:08 crc kubenswrapper[5133]: I1121 15:39:08.569025 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-fft98/crc-debug-qbkm9"] Nov 21 15:39:08 crc kubenswrapper[5133]: I1121 15:39:08.572209 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-fft98/crc-debug-qbkm9" Nov 21 15:39:08 crc kubenswrapper[5133]: I1121 15:39:08.757190 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/02f22581-2b07-423e-bf90-08d81181eb72-host\") pod \"crc-debug-qbkm9\" (UID: \"02f22581-2b07-423e-bf90-08d81181eb72\") " pod="openshift-must-gather-fft98/crc-debug-qbkm9" Nov 21 15:39:08 crc kubenswrapper[5133]: I1121 15:39:08.757313 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8v7kk\" (UniqueName: \"kubernetes.io/projected/02f22581-2b07-423e-bf90-08d81181eb72-kube-api-access-8v7kk\") pod \"crc-debug-qbkm9\" (UID: \"02f22581-2b07-423e-bf90-08d81181eb72\") " pod="openshift-must-gather-fft98/crc-debug-qbkm9" Nov 21 15:39:08 crc kubenswrapper[5133]: I1121 15:39:08.859216 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8v7kk\" (UniqueName: \"kubernetes.io/projected/02f22581-2b07-423e-bf90-08d81181eb72-kube-api-access-8v7kk\") pod \"crc-debug-qbkm9\" (UID: \"02f22581-2b07-423e-bf90-08d81181eb72\") " pod="openshift-must-gather-fft98/crc-debug-qbkm9" Nov 21 15:39:08 crc kubenswrapper[5133]: I1121 15:39:08.859340 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/02f22581-2b07-423e-bf90-08d81181eb72-host\") pod \"crc-debug-qbkm9\" (UID: \"02f22581-2b07-423e-bf90-08d81181eb72\") " pod="openshift-must-gather-fft98/crc-debug-qbkm9" Nov 21 15:39:08 crc kubenswrapper[5133]: I1121 15:39:08.859546 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/02f22581-2b07-423e-bf90-08d81181eb72-host\") pod \"crc-debug-qbkm9\" (UID: \"02f22581-2b07-423e-bf90-08d81181eb72\") " pod="openshift-must-gather-fft98/crc-debug-qbkm9" Nov 21 15:39:08 crc kubenswrapper[5133]: I1121 15:39:08.878622 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8v7kk\" (UniqueName: \"kubernetes.io/projected/02f22581-2b07-423e-bf90-08d81181eb72-kube-api-access-8v7kk\") pod \"crc-debug-qbkm9\" (UID: \"02f22581-2b07-423e-bf90-08d81181eb72\") " pod="openshift-must-gather-fft98/crc-debug-qbkm9" Nov 21 15:39:08 crc kubenswrapper[5133]: I1121 15:39:08.887575 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-fft98/crc-debug-qbkm9" Nov 21 15:39:08 crc kubenswrapper[5133]: W1121 15:39:08.965676 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod02f22581_2b07_423e_bf90_08d81181eb72.slice/crio-8205efca0213a705b145c5279d1384ec44cd5cb5baae3f7199197df147024e80 WatchSource:0}: Error finding container 8205efca0213a705b145c5279d1384ec44cd5cb5baae3f7199197df147024e80: Status 404 returned error can't find the container with id 8205efca0213a705b145c5279d1384ec44cd5cb5baae3f7199197df147024e80 Nov 21 15:39:09 crc kubenswrapper[5133]: I1121 15:39:09.758658 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-fft98/crc-debug-qbkm9" event={"ID":"02f22581-2b07-423e-bf90-08d81181eb72","Type":"ContainerStarted","Data":"8205efca0213a705b145c5279d1384ec44cd5cb5baae3f7199197df147024e80"} Nov 21 15:39:19 crc kubenswrapper[5133]: I1121 15:39:19.850655 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-fft98/crc-debug-qbkm9" event={"ID":"02f22581-2b07-423e-bf90-08d81181eb72","Type":"ContainerStarted","Data":"29eadaabc384b5aed5fcb68407e1fefbbb708de17168627976517f9c2fe0983e"} Nov 21 15:39:19 crc kubenswrapper[5133]: I1121 15:39:19.872770 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-fft98/crc-debug-qbkm9" podStartSLOduration=2.013535693 podStartE2EDuration="11.872735136s" podCreationTimestamp="2025-11-21 15:39:08 +0000 UTC" firstStartedPulling="2025-11-21 15:39:08.970997665 +0000 UTC m=+7008.768829913" lastFinishedPulling="2025-11-21 15:39:18.830197108 +0000 UTC m=+7018.628029356" observedRunningTime="2025-11-21 15:39:19.86612963 +0000 UTC m=+7019.663961888" watchObservedRunningTime="2025-11-21 15:39:19.872735136 +0000 UTC m=+7019.670567374" Nov 21 15:39:23 crc kubenswrapper[5133]: I1121 15:39:23.310681 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 15:39:23 crc kubenswrapper[5133]: I1121 15:39:23.311212 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 15:39:53 crc kubenswrapper[5133]: I1121 15:39:53.310794 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 15:39:53 crc kubenswrapper[5133]: I1121 15:39:53.311365 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 15:39:53 crc kubenswrapper[5133]: I1121 15:39:53.311410 5133 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" Nov 21 15:39:53 crc kubenswrapper[5133]: I1121 15:39:53.312159 5133 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"687490ee2e06821d0dabcc541391ccb5223aacb17e1dc37e0067e4b8571cb9fa"} pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 15:39:53 crc kubenswrapper[5133]: I1121 15:39:53.312219 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" containerID="cri-o://687490ee2e06821d0dabcc541391ccb5223aacb17e1dc37e0067e4b8571cb9fa" gracePeriod=600 Nov 21 15:39:54 crc kubenswrapper[5133]: I1121 15:39:54.159654 5133 generic.go:334] "Generic (PLEG): container finished" podID="52f5a729-05d1-4f84-a216-1df3233af57d" containerID="687490ee2e06821d0dabcc541391ccb5223aacb17e1dc37e0067e4b8571cb9fa" exitCode=0 Nov 21 15:39:54 crc kubenswrapper[5133]: I1121 15:39:54.159720 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" event={"ID":"52f5a729-05d1-4f84-a216-1df3233af57d","Type":"ContainerDied","Data":"687490ee2e06821d0dabcc541391ccb5223aacb17e1dc37e0067e4b8571cb9fa"} Nov 21 15:39:54 crc kubenswrapper[5133]: I1121 15:39:54.160318 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" event={"ID":"52f5a729-05d1-4f84-a216-1df3233af57d","Type":"ContainerStarted","Data":"b58db206d21a207a041b6beb15809267620ac7e9bf4cc60f4d82a43276d4adbd"} Nov 21 15:39:54 crc kubenswrapper[5133]: I1121 15:39:54.160362 5133 scope.go:117] "RemoveContainer" containerID="b88d3b7b176333c05c13a9dc77345d383250a38ab5512a29acb877d8889770b9" Nov 21 15:40:09 crc kubenswrapper[5133]: I1121 15:40:09.326386 5133 generic.go:334] "Generic (PLEG): container finished" podID="02f22581-2b07-423e-bf90-08d81181eb72" containerID="29eadaabc384b5aed5fcb68407e1fefbbb708de17168627976517f9c2fe0983e" exitCode=0 Nov 21 15:40:09 crc kubenswrapper[5133]: I1121 15:40:09.326586 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-fft98/crc-debug-qbkm9" event={"ID":"02f22581-2b07-423e-bf90-08d81181eb72","Type":"ContainerDied","Data":"29eadaabc384b5aed5fcb68407e1fefbbb708de17168627976517f9c2fe0983e"} Nov 21 15:40:10 crc kubenswrapper[5133]: I1121 15:40:10.488403 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-fft98/crc-debug-qbkm9" Nov 21 15:40:10 crc kubenswrapper[5133]: I1121 15:40:10.529643 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-fft98/crc-debug-qbkm9"] Nov 21 15:40:10 crc kubenswrapper[5133]: I1121 15:40:10.543419 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-fft98/crc-debug-qbkm9"] Nov 21 15:40:10 crc kubenswrapper[5133]: I1121 15:40:10.614437 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8v7kk\" (UniqueName: \"kubernetes.io/projected/02f22581-2b07-423e-bf90-08d81181eb72-kube-api-access-8v7kk\") pod \"02f22581-2b07-423e-bf90-08d81181eb72\" (UID: \"02f22581-2b07-423e-bf90-08d81181eb72\") " Nov 21 15:40:10 crc kubenswrapper[5133]: I1121 15:40:10.614982 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/02f22581-2b07-423e-bf90-08d81181eb72-host\") pod \"02f22581-2b07-423e-bf90-08d81181eb72\" (UID: \"02f22581-2b07-423e-bf90-08d81181eb72\") " Nov 21 15:40:10 crc kubenswrapper[5133]: I1121 15:40:10.615171 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/02f22581-2b07-423e-bf90-08d81181eb72-host" (OuterVolumeSpecName: "host") pod "02f22581-2b07-423e-bf90-08d81181eb72" (UID: "02f22581-2b07-423e-bf90-08d81181eb72"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 15:40:10 crc kubenswrapper[5133]: I1121 15:40:10.615858 5133 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/02f22581-2b07-423e-bf90-08d81181eb72-host\") on node \"crc\" DevicePath \"\"" Nov 21 15:40:10 crc kubenswrapper[5133]: I1121 15:40:10.621174 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/02f22581-2b07-423e-bf90-08d81181eb72-kube-api-access-8v7kk" (OuterVolumeSpecName: "kube-api-access-8v7kk") pod "02f22581-2b07-423e-bf90-08d81181eb72" (UID: "02f22581-2b07-423e-bf90-08d81181eb72"). InnerVolumeSpecName "kube-api-access-8v7kk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:40:10 crc kubenswrapper[5133]: I1121 15:40:10.718165 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8v7kk\" (UniqueName: \"kubernetes.io/projected/02f22581-2b07-423e-bf90-08d81181eb72-kube-api-access-8v7kk\") on node \"crc\" DevicePath \"\"" Nov 21 15:40:11 crc kubenswrapper[5133]: I1121 15:40:11.355393 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8205efca0213a705b145c5279d1384ec44cd5cb5baae3f7199197df147024e80" Nov 21 15:40:11 crc kubenswrapper[5133]: I1121 15:40:11.355442 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-fft98/crc-debug-qbkm9" Nov 21 15:40:11 crc kubenswrapper[5133]: I1121 15:40:11.710598 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-fft98/crc-debug-mg64f"] Nov 21 15:40:11 crc kubenswrapper[5133]: E1121 15:40:11.711049 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="02f22581-2b07-423e-bf90-08d81181eb72" containerName="container-00" Nov 21 15:40:11 crc kubenswrapper[5133]: I1121 15:40:11.711067 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="02f22581-2b07-423e-bf90-08d81181eb72" containerName="container-00" Nov 21 15:40:11 crc kubenswrapper[5133]: I1121 15:40:11.711263 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="02f22581-2b07-423e-bf90-08d81181eb72" containerName="container-00" Nov 21 15:40:11 crc kubenswrapper[5133]: I1121 15:40:11.711957 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-fft98/crc-debug-mg64f" Nov 21 15:40:11 crc kubenswrapper[5133]: I1121 15:40:11.737377 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/a5afb5a4-9968-4fbc-974f-a435617c8b83-host\") pod \"crc-debug-mg64f\" (UID: \"a5afb5a4-9968-4fbc-974f-a435617c8b83\") " pod="openshift-must-gather-fft98/crc-debug-mg64f" Nov 21 15:40:11 crc kubenswrapper[5133]: I1121 15:40:11.737783 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s68k7\" (UniqueName: \"kubernetes.io/projected/a5afb5a4-9968-4fbc-974f-a435617c8b83-kube-api-access-s68k7\") pod \"crc-debug-mg64f\" (UID: \"a5afb5a4-9968-4fbc-974f-a435617c8b83\") " pod="openshift-must-gather-fft98/crc-debug-mg64f" Nov 21 15:40:11 crc kubenswrapper[5133]: I1121 15:40:11.838752 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s68k7\" (UniqueName: \"kubernetes.io/projected/a5afb5a4-9968-4fbc-974f-a435617c8b83-kube-api-access-s68k7\") pod \"crc-debug-mg64f\" (UID: \"a5afb5a4-9968-4fbc-974f-a435617c8b83\") " pod="openshift-must-gather-fft98/crc-debug-mg64f" Nov 21 15:40:11 crc kubenswrapper[5133]: I1121 15:40:11.839227 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/a5afb5a4-9968-4fbc-974f-a435617c8b83-host\") pod \"crc-debug-mg64f\" (UID: \"a5afb5a4-9968-4fbc-974f-a435617c8b83\") " pod="openshift-must-gather-fft98/crc-debug-mg64f" Nov 21 15:40:11 crc kubenswrapper[5133]: I1121 15:40:11.839357 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/a5afb5a4-9968-4fbc-974f-a435617c8b83-host\") pod \"crc-debug-mg64f\" (UID: \"a5afb5a4-9968-4fbc-974f-a435617c8b83\") " pod="openshift-must-gather-fft98/crc-debug-mg64f" Nov 21 15:40:11 crc kubenswrapper[5133]: I1121 15:40:11.872848 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s68k7\" (UniqueName: \"kubernetes.io/projected/a5afb5a4-9968-4fbc-974f-a435617c8b83-kube-api-access-s68k7\") pod \"crc-debug-mg64f\" (UID: \"a5afb5a4-9968-4fbc-974f-a435617c8b83\") " pod="openshift-must-gather-fft98/crc-debug-mg64f" Nov 21 15:40:12 crc kubenswrapper[5133]: I1121 15:40:12.033673 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-fft98/crc-debug-mg64f" Nov 21 15:40:12 crc kubenswrapper[5133]: W1121 15:40:12.075577 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda5afb5a4_9968_4fbc_974f_a435617c8b83.slice/crio-ae83dac3b44264b37b7afc4cf849fe4d7e2e8385b2f2ee30ecb44da7bb8c3ae5 WatchSource:0}: Error finding container ae83dac3b44264b37b7afc4cf849fe4d7e2e8385b2f2ee30ecb44da7bb8c3ae5: Status 404 returned error can't find the container with id ae83dac3b44264b37b7afc4cf849fe4d7e2e8385b2f2ee30ecb44da7bb8c3ae5 Nov 21 15:40:12 crc kubenswrapper[5133]: I1121 15:40:12.366227 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-fft98/crc-debug-mg64f" event={"ID":"a5afb5a4-9968-4fbc-974f-a435617c8b83","Type":"ContainerStarted","Data":"e3544c0d89c702117beff1d3a42e9534916a377f07bf5219b324e7880f4466a9"} Nov 21 15:40:12 crc kubenswrapper[5133]: I1121 15:40:12.366539 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-fft98/crc-debug-mg64f" event={"ID":"a5afb5a4-9968-4fbc-974f-a435617c8b83","Type":"ContainerStarted","Data":"ae83dac3b44264b37b7afc4cf849fe4d7e2e8385b2f2ee30ecb44da7bb8c3ae5"} Nov 21 15:40:12 crc kubenswrapper[5133]: I1121 15:40:12.388339 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-fft98/crc-debug-mg64f" podStartSLOduration=1.388320915 podStartE2EDuration="1.388320915s" podCreationTimestamp="2025-11-21 15:40:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:40:12.378715059 +0000 UTC m=+7072.176547317" watchObservedRunningTime="2025-11-21 15:40:12.388320915 +0000 UTC m=+7072.186153183" Nov 21 15:40:12 crc kubenswrapper[5133]: I1121 15:40:12.472476 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="02f22581-2b07-423e-bf90-08d81181eb72" path="/var/lib/kubelet/pods/02f22581-2b07-423e-bf90-08d81181eb72/volumes" Nov 21 15:40:13 crc kubenswrapper[5133]: I1121 15:40:13.379051 5133 generic.go:334] "Generic (PLEG): container finished" podID="a5afb5a4-9968-4fbc-974f-a435617c8b83" containerID="e3544c0d89c702117beff1d3a42e9534916a377f07bf5219b324e7880f4466a9" exitCode=0 Nov 21 15:40:13 crc kubenswrapper[5133]: I1121 15:40:13.379121 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-fft98/crc-debug-mg64f" event={"ID":"a5afb5a4-9968-4fbc-974f-a435617c8b83","Type":"ContainerDied","Data":"e3544c0d89c702117beff1d3a42e9534916a377f07bf5219b324e7880f4466a9"} Nov 21 15:40:14 crc kubenswrapper[5133]: I1121 15:40:14.488910 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-fft98/crc-debug-mg64f" Nov 21 15:40:14 crc kubenswrapper[5133]: I1121 15:40:14.581399 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/a5afb5a4-9968-4fbc-974f-a435617c8b83-host\") pod \"a5afb5a4-9968-4fbc-974f-a435617c8b83\" (UID: \"a5afb5a4-9968-4fbc-974f-a435617c8b83\") " Nov 21 15:40:14 crc kubenswrapper[5133]: I1121 15:40:14.581554 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a5afb5a4-9968-4fbc-974f-a435617c8b83-host" (OuterVolumeSpecName: "host") pod "a5afb5a4-9968-4fbc-974f-a435617c8b83" (UID: "a5afb5a4-9968-4fbc-974f-a435617c8b83"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 15:40:14 crc kubenswrapper[5133]: I1121 15:40:14.581647 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s68k7\" (UniqueName: \"kubernetes.io/projected/a5afb5a4-9968-4fbc-974f-a435617c8b83-kube-api-access-s68k7\") pod \"a5afb5a4-9968-4fbc-974f-a435617c8b83\" (UID: \"a5afb5a4-9968-4fbc-974f-a435617c8b83\") " Nov 21 15:40:14 crc kubenswrapper[5133]: I1121 15:40:14.582229 5133 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/a5afb5a4-9968-4fbc-974f-a435617c8b83-host\") on node \"crc\" DevicePath \"\"" Nov 21 15:40:14 crc kubenswrapper[5133]: I1121 15:40:14.588274 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a5afb5a4-9968-4fbc-974f-a435617c8b83-kube-api-access-s68k7" (OuterVolumeSpecName: "kube-api-access-s68k7") pod "a5afb5a4-9968-4fbc-974f-a435617c8b83" (UID: "a5afb5a4-9968-4fbc-974f-a435617c8b83"). InnerVolumeSpecName "kube-api-access-s68k7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:40:14 crc kubenswrapper[5133]: I1121 15:40:14.683418 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s68k7\" (UniqueName: \"kubernetes.io/projected/a5afb5a4-9968-4fbc-974f-a435617c8b83-kube-api-access-s68k7\") on node \"crc\" DevicePath \"\"" Nov 21 15:40:15 crc kubenswrapper[5133]: I1121 15:40:15.238393 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-fft98/crc-debug-mg64f"] Nov 21 15:40:15 crc kubenswrapper[5133]: I1121 15:40:15.245652 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-fft98/crc-debug-mg64f"] Nov 21 15:40:15 crc kubenswrapper[5133]: I1121 15:40:15.406269 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ae83dac3b44264b37b7afc4cf849fe4d7e2e8385b2f2ee30ecb44da7bb8c3ae5" Nov 21 15:40:15 crc kubenswrapper[5133]: I1121 15:40:15.406348 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-fft98/crc-debug-mg64f" Nov 21 15:40:16 crc kubenswrapper[5133]: I1121 15:40:16.405336 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-fft98/crc-debug-fmtpp"] Nov 21 15:40:16 crc kubenswrapper[5133]: E1121 15:40:16.406183 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a5afb5a4-9968-4fbc-974f-a435617c8b83" containerName="container-00" Nov 21 15:40:16 crc kubenswrapper[5133]: I1121 15:40:16.406200 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="a5afb5a4-9968-4fbc-974f-a435617c8b83" containerName="container-00" Nov 21 15:40:16 crc kubenswrapper[5133]: I1121 15:40:16.406432 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="a5afb5a4-9968-4fbc-974f-a435617c8b83" containerName="container-00" Nov 21 15:40:16 crc kubenswrapper[5133]: I1121 15:40:16.407202 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-fft98/crc-debug-fmtpp" Nov 21 15:40:16 crc kubenswrapper[5133]: I1121 15:40:16.469350 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a5afb5a4-9968-4fbc-974f-a435617c8b83" path="/var/lib/kubelet/pods/a5afb5a4-9968-4fbc-974f-a435617c8b83/volumes" Nov 21 15:40:16 crc kubenswrapper[5133]: I1121 15:40:16.516403 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/68c857b0-c83a-4afa-8a74-0b5ea4b3caf3-host\") pod \"crc-debug-fmtpp\" (UID: \"68c857b0-c83a-4afa-8a74-0b5ea4b3caf3\") " pod="openshift-must-gather-fft98/crc-debug-fmtpp" Nov 21 15:40:16 crc kubenswrapper[5133]: I1121 15:40:16.516628 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fzl5l\" (UniqueName: \"kubernetes.io/projected/68c857b0-c83a-4afa-8a74-0b5ea4b3caf3-kube-api-access-fzl5l\") pod \"crc-debug-fmtpp\" (UID: \"68c857b0-c83a-4afa-8a74-0b5ea4b3caf3\") " pod="openshift-must-gather-fft98/crc-debug-fmtpp" Nov 21 15:40:16 crc kubenswrapper[5133]: I1121 15:40:16.619126 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/68c857b0-c83a-4afa-8a74-0b5ea4b3caf3-host\") pod \"crc-debug-fmtpp\" (UID: \"68c857b0-c83a-4afa-8a74-0b5ea4b3caf3\") " pod="openshift-must-gather-fft98/crc-debug-fmtpp" Nov 21 15:40:16 crc kubenswrapper[5133]: I1121 15:40:16.619283 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fzl5l\" (UniqueName: \"kubernetes.io/projected/68c857b0-c83a-4afa-8a74-0b5ea4b3caf3-kube-api-access-fzl5l\") pod \"crc-debug-fmtpp\" (UID: \"68c857b0-c83a-4afa-8a74-0b5ea4b3caf3\") " pod="openshift-must-gather-fft98/crc-debug-fmtpp" Nov 21 15:40:16 crc kubenswrapper[5133]: I1121 15:40:16.619391 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/68c857b0-c83a-4afa-8a74-0b5ea4b3caf3-host\") pod \"crc-debug-fmtpp\" (UID: \"68c857b0-c83a-4afa-8a74-0b5ea4b3caf3\") " pod="openshift-must-gather-fft98/crc-debug-fmtpp" Nov 21 15:40:16 crc kubenswrapper[5133]: I1121 15:40:16.649864 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fzl5l\" (UniqueName: \"kubernetes.io/projected/68c857b0-c83a-4afa-8a74-0b5ea4b3caf3-kube-api-access-fzl5l\") pod \"crc-debug-fmtpp\" (UID: \"68c857b0-c83a-4afa-8a74-0b5ea4b3caf3\") " pod="openshift-must-gather-fft98/crc-debug-fmtpp" Nov 21 15:40:16 crc kubenswrapper[5133]: I1121 15:40:16.728116 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-fft98/crc-debug-fmtpp" Nov 21 15:40:16 crc kubenswrapper[5133]: W1121 15:40:16.765499 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod68c857b0_c83a_4afa_8a74_0b5ea4b3caf3.slice/crio-ef50d2b4e05592f29eb469a301bb717ac8527438ae7a3e88d4a911ae7aa14dab WatchSource:0}: Error finding container ef50d2b4e05592f29eb469a301bb717ac8527438ae7a3e88d4a911ae7aa14dab: Status 404 returned error can't find the container with id ef50d2b4e05592f29eb469a301bb717ac8527438ae7a3e88d4a911ae7aa14dab Nov 21 15:40:17 crc kubenswrapper[5133]: I1121 15:40:17.425933 5133 generic.go:334] "Generic (PLEG): container finished" podID="68c857b0-c83a-4afa-8a74-0b5ea4b3caf3" containerID="dbe99f55791714ede4342b12dd673f9519a9af24c5facb334e8087529e93569a" exitCode=0 Nov 21 15:40:17 crc kubenswrapper[5133]: I1121 15:40:17.426026 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-fft98/crc-debug-fmtpp" event={"ID":"68c857b0-c83a-4afa-8a74-0b5ea4b3caf3","Type":"ContainerDied","Data":"dbe99f55791714ede4342b12dd673f9519a9af24c5facb334e8087529e93569a"} Nov 21 15:40:17 crc kubenswrapper[5133]: I1121 15:40:17.426309 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-fft98/crc-debug-fmtpp" event={"ID":"68c857b0-c83a-4afa-8a74-0b5ea4b3caf3","Type":"ContainerStarted","Data":"ef50d2b4e05592f29eb469a301bb717ac8527438ae7a3e88d4a911ae7aa14dab"} Nov 21 15:40:17 crc kubenswrapper[5133]: I1121 15:40:17.477526 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-fft98/crc-debug-fmtpp"] Nov 21 15:40:17 crc kubenswrapper[5133]: I1121 15:40:17.483171 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-fft98/crc-debug-fmtpp"] Nov 21 15:40:18 crc kubenswrapper[5133]: I1121 15:40:18.548774 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-fft98/crc-debug-fmtpp" Nov 21 15:40:18 crc kubenswrapper[5133]: I1121 15:40:18.661814 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/68c857b0-c83a-4afa-8a74-0b5ea4b3caf3-host\") pod \"68c857b0-c83a-4afa-8a74-0b5ea4b3caf3\" (UID: \"68c857b0-c83a-4afa-8a74-0b5ea4b3caf3\") " Nov 21 15:40:18 crc kubenswrapper[5133]: I1121 15:40:18.661910 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fzl5l\" (UniqueName: \"kubernetes.io/projected/68c857b0-c83a-4afa-8a74-0b5ea4b3caf3-kube-api-access-fzl5l\") pod \"68c857b0-c83a-4afa-8a74-0b5ea4b3caf3\" (UID: \"68c857b0-c83a-4afa-8a74-0b5ea4b3caf3\") " Nov 21 15:40:18 crc kubenswrapper[5133]: I1121 15:40:18.662073 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/68c857b0-c83a-4afa-8a74-0b5ea4b3caf3-host" (OuterVolumeSpecName: "host") pod "68c857b0-c83a-4afa-8a74-0b5ea4b3caf3" (UID: "68c857b0-c83a-4afa-8a74-0b5ea4b3caf3"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 21 15:40:18 crc kubenswrapper[5133]: I1121 15:40:18.662523 5133 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/68c857b0-c83a-4afa-8a74-0b5ea4b3caf3-host\") on node \"crc\" DevicePath \"\"" Nov 21 15:40:18 crc kubenswrapper[5133]: I1121 15:40:18.667547 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/68c857b0-c83a-4afa-8a74-0b5ea4b3caf3-kube-api-access-fzl5l" (OuterVolumeSpecName: "kube-api-access-fzl5l") pod "68c857b0-c83a-4afa-8a74-0b5ea4b3caf3" (UID: "68c857b0-c83a-4afa-8a74-0b5ea4b3caf3"). InnerVolumeSpecName "kube-api-access-fzl5l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:40:18 crc kubenswrapper[5133]: I1121 15:40:18.764919 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fzl5l\" (UniqueName: \"kubernetes.io/projected/68c857b0-c83a-4afa-8a74-0b5ea4b3caf3-kube-api-access-fzl5l\") on node \"crc\" DevicePath \"\"" Nov 21 15:40:19 crc kubenswrapper[5133]: I1121 15:40:19.451034 5133 scope.go:117] "RemoveContainer" containerID="dbe99f55791714ede4342b12dd673f9519a9af24c5facb334e8087529e93569a" Nov 21 15:40:19 crc kubenswrapper[5133]: I1121 15:40:19.451085 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-fft98/crc-debug-fmtpp" Nov 21 15:40:20 crc kubenswrapper[5133]: I1121 15:40:20.479196 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="68c857b0-c83a-4afa-8a74-0b5ea4b3caf3" path="/var/lib/kubelet/pods/68c857b0-c83a-4afa-8a74-0b5ea4b3caf3/volumes" Nov 21 15:40:48 crc kubenswrapper[5133]: I1121 15:40:48.628980 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-6f698694b-sk8kx_40cd1965-9315-4ed8-8902-4d9f77e63740/barbican-api/0.log" Nov 21 15:40:48 crc kubenswrapper[5133]: I1121 15:40:48.817817 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-68665bb7f8-mv95p_f583d06c-f59e-4f1e-883b-0ad7617e3c0f/barbican-keystone-listener/0.log" Nov 21 15:40:48 crc kubenswrapper[5133]: I1121 15:40:48.848435 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-6f698694b-sk8kx_40cd1965-9315-4ed8-8902-4d9f77e63740/barbican-api-log/0.log" Nov 21 15:40:49 crc kubenswrapper[5133]: I1121 15:40:49.009810 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-68665bb7f8-mv95p_f583d06c-f59e-4f1e-883b-0ad7617e3c0f/barbican-keystone-listener-log/0.log" Nov 21 15:40:49 crc kubenswrapper[5133]: I1121 15:40:49.035421 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-654ff44bc9-5rc2k_1701df73-8d79-4305-a204-4ab7bf029dfd/barbican-worker-log/0.log" Nov 21 15:40:49 crc kubenswrapper[5133]: I1121 15:40:49.072032 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-654ff44bc9-5rc2k_1701df73-8d79-4305-a204-4ab7bf029dfd/barbican-worker/0.log" Nov 21 15:40:49 crc kubenswrapper[5133]: I1121 15:40:49.275826 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-8x5fz_bcf3ecf2-fb8c-424f-a9e8-600a16795bed/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Nov 21 15:40:49 crc kubenswrapper[5133]: I1121 15:40:49.276864 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_404313b1-20dd-4623-ad80-ab9e2b521526/ceilometer-central-agent/1.log" Nov 21 15:40:49 crc kubenswrapper[5133]: I1121 15:40:49.379058 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_404313b1-20dd-4623-ad80-ab9e2b521526/ceilometer-central-agent/0.log" Nov 21 15:40:49 crc kubenswrapper[5133]: I1121 15:40:49.471714 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_404313b1-20dd-4623-ad80-ab9e2b521526/proxy-httpd/0.log" Nov 21 15:40:49 crc kubenswrapper[5133]: I1121 15:40:49.475090 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_404313b1-20dd-4623-ad80-ab9e2b521526/sg-core/0.log" Nov 21 15:40:49 crc kubenswrapper[5133]: I1121 15:40:49.494943 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_404313b1-20dd-4623-ad80-ab9e2b521526/ceilometer-notification-agent/0.log" Nov 21 15:40:49 crc kubenswrapper[5133]: I1121 15:40:49.795302 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-xwfg8_1c45492b-4ba6-48e2-996e-807c20ed7852/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam/0.log" Nov 21 15:40:49 crc kubenswrapper[5133]: I1121 15:40:49.795317 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceph-client-edpm-deployment-openstack-edpm-ipam-lg9mx_95893adb-b1b1-4216-987f-ca92a8a72629/ceph-client-edpm-deployment-openstack-edpm-ipam/0.log" Nov 21 15:40:50 crc kubenswrapper[5133]: I1121 15:40:50.382744 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-backup-0_656b7d50-9049-415a-a4b1-08b531893110/probe/0.log" Nov 21 15:40:50 crc kubenswrapper[5133]: I1121 15:40:50.829295 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_46d511f0-7077-446e-b1f6-941fd109c41c/cinder-scheduler/0.log" Nov 21 15:40:51 crc kubenswrapper[5133]: I1121 15:40:51.009543 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_afc9fa06-90c1-49de-b5aa-9b10586a82b5/cinder-api/0.log" Nov 21 15:40:51 crc kubenswrapper[5133]: I1121 15:40:51.030237 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_afc9fa06-90c1-49de-b5aa-9b10586a82b5/cinder-api-log/0.log" Nov 21 15:40:51 crc kubenswrapper[5133]: I1121 15:40:51.131725 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_46d511f0-7077-446e-b1f6-941fd109c41c/probe/0.log" Nov 21 15:40:51 crc kubenswrapper[5133]: I1121 15:40:51.572204 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-volume1-0_3bf15e01-8975-4617-b353-163613da4bc5/probe/0.log" Nov 21 15:40:51 crc kubenswrapper[5133]: I1121 15:40:51.982776 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-edpm-deployment-openstack-edpm-ipam-f4k82_b94efc2b-b41e-4cbe-9f19-a89bdc99630c/configure-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 21 15:40:51 crc kubenswrapper[5133]: I1121 15:40:51.993418 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-volume1-0_3bf15e01-8975-4617-b353-163613da4bc5/cinder-volume/1.log" Nov 21 15:40:52 crc kubenswrapper[5133]: I1121 15:40:52.231575 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-edpm-deployment-openstack-edpm-ipam-5zdfl_037179d7-f069-49f7-8f2a-9fb94b34064d/configure-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 21 15:40:52 crc kubenswrapper[5133]: I1121 15:40:52.438038 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-69655fd4bf-xltwv_06549e5c-262d-45b3-9790-7c264bcecf3c/init/0.log" Nov 21 15:40:52 crc kubenswrapper[5133]: I1121 15:40:52.637818 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-69655fd4bf-xltwv_06549e5c-262d-45b3-9790-7c264bcecf3c/init/0.log" Nov 21 15:40:52 crc kubenswrapper[5133]: I1121 15:40:52.800633 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-69655fd4bf-xltwv_06549e5c-262d-45b3-9790-7c264bcecf3c/dnsmasq-dns/0.log" Nov 21 15:40:52 crc kubenswrapper[5133]: I1121 15:40:52.880145 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_bc682d32-5242-41ad-8040-1b88a4ed2534/glance-httpd/0.log" Nov 21 15:40:52 crc kubenswrapper[5133]: I1121 15:40:52.903313 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_bc682d32-5242-41ad-8040-1b88a4ed2534/glance-log/0.log" Nov 21 15:40:53 crc kubenswrapper[5133]: I1121 15:40:53.069437 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_73ddb6a4-cb5d-4c55-925b-3a2047cc35d1/glance-log/0.log" Nov 21 15:40:53 crc kubenswrapper[5133]: I1121 15:40:53.117691 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_73ddb6a4-cb5d-4c55-925b-3a2047cc35d1/glance-httpd/0.log" Nov 21 15:40:53 crc kubenswrapper[5133]: I1121 15:40:53.392949 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-6494f4b7cd-7bgbs_ce180175-f40a-48ea-bf15-1c8bfa8ff9aa/horizon/0.log" Nov 21 15:40:53 crc kubenswrapper[5133]: I1121 15:40:53.579751 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-edpm-deployment-openstack-edpm-ipam-8dhbb_d56ab399-fd3b-41d9-a915-ecf9bd15d5c8/install-certs-edpm-deployment-openstack-edpm-ipam/0.log" Nov 21 15:40:53 crc kubenswrapper[5133]: I1121 15:40:53.580132 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-backup-0_656b7d50-9049-415a-a4b1-08b531893110/cinder-backup/0.log" Nov 21 15:40:53 crc kubenswrapper[5133]: I1121 15:40:53.780638 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-edpm-deployment-openstack-edpm-ipam-s6c4j_021cf83b-1e98-445e-a69b-7a6509718a2f/install-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 21 15:40:53 crc kubenswrapper[5133]: I1121 15:40:53.885056 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29395621-24j44_7a15a1a4-bdab-48d4-923c-426ab5f12b1b/keystone-cron/0.log" Nov 21 15:40:53 crc kubenswrapper[5133]: I1121 15:40:53.944504 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-6494f4b7cd-7bgbs_ce180175-f40a-48ea-bf15-1c8bfa8ff9aa/horizon-log/0.log" Nov 21 15:40:54 crc kubenswrapper[5133]: I1121 15:40:54.176253 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_6898e19f-ae5d-4d82-ac31-d5100fb81625/kube-state-metrics/0.log" Nov 21 15:40:54 crc kubenswrapper[5133]: I1121 15:40:54.351737 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-edpm-deployment-openstack-edpm-ipam-5kst7_7e411878-ed75-43cd-9684-6f5bc9f58f1f/libvirt-edpm-deployment-openstack-edpm-ipam/0.log" Nov 21 15:40:54 crc kubenswrapper[5133]: I1121 15:40:54.576232 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-api-0_087e2ee4-bb11-426d-b624-2240837ee732/manila-api-log/0.log" Nov 21 15:40:54 crc kubenswrapper[5133]: I1121 15:40:54.628100 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-api-0_087e2ee4-bb11-426d-b624-2240837ee732/manila-api/0.log" Nov 21 15:40:54 crc kubenswrapper[5133]: I1121 15:40:54.832398 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-scheduler-0_1306baba-7e9f-4f12-a43d-2c39cd5eb212/probe/0.log" Nov 21 15:40:54 crc kubenswrapper[5133]: I1121 15:40:54.877676 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-scheduler-0_1306baba-7e9f-4f12-a43d-2c39cd5eb212/manila-scheduler/0.log" Nov 21 15:40:55 crc kubenswrapper[5133]: I1121 15:40:55.008656 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-f84bff9cd-skfwq_b0c7d424-6218-4da5-ae54-203c06f3d1be/keystone-api/0.log" Nov 21 15:40:55 crc kubenswrapper[5133]: I1121 15:40:55.058948 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-share-share1-0_319ced81-67f1-4bb8-b896-c6f74ce4cfc0/manila-share/0.log" Nov 21 15:40:55 crc kubenswrapper[5133]: I1121 15:40:55.081851 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-share-share1-0_319ced81-67f1-4bb8-b896-c6f74ce4cfc0/probe/0.log" Nov 21 15:40:55 crc kubenswrapper[5133]: I1121 15:40:55.841793 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-6f8dcf7d6f-7dxkg_ac4cef87-965c-4905-b980-0dd6591d9317/neutron-httpd/0.log" Nov 21 15:40:55 crc kubenswrapper[5133]: I1121 15:40:55.941074 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-6f8dcf7d6f-7dxkg_ac4cef87-965c-4905-b980-0dd6591d9317/neutron-api/0.log" Nov 21 15:40:56 crc kubenswrapper[5133]: I1121 15:40:56.005777 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-edpm-deployment-openstack-edpm-ipam-fjdcm_8d100df8-21d2-4dde-a9cf-25e3ad027bb7/neutron-metadata-edpm-deployment-openstack-edpm-ipam/0.log" Nov 21 15:40:56 crc kubenswrapper[5133]: I1121 15:40:56.755658 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_3b8a3cc3-d221-41ed-97ec-1fb8c4bf62c3/nova-cell0-conductor-conductor/0.log" Nov 21 15:40:57 crc kubenswrapper[5133]: I1121 15:40:57.064700 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_22807ad8-e61a-4708-8dda-5863f054c746/nova-api-log/0.log" Nov 21 15:40:57 crc kubenswrapper[5133]: I1121 15:40:57.284242 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_3c061b8f-ed3c-4996-80b5-6e8cfcc18968/nova-cell1-conductor-conductor/0.log" Nov 21 15:40:57 crc kubenswrapper[5133]: I1121 15:40:57.596378 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_f1c57f0d-b96c-415b-9f2f-30a8ba4188a1/nova-cell1-novncproxy-novncproxy/0.log" Nov 21 15:40:57 crc kubenswrapper[5133]: I1121 15:40:57.769800 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-custom-ceph-edpm-deployment-openstack-edpm-ipam-sp4zb_25ee67bf-c176-4cdc-b2e8-1b36b72ff88e/nova-custom-ceph-edpm-deployment-openstack-edpm-ipam/0.log" Nov 21 15:40:57 crc kubenswrapper[5133]: I1121 15:40:57.861235 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_22807ad8-e61a-4708-8dda-5863f054c746/nova-api-api/0.log" Nov 21 15:40:58 crc kubenswrapper[5133]: I1121 15:40:58.086957 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_cda96e6d-092b-4f4a-8b74-e2c9771825f6/nova-metadata-log/0.log" Nov 21 15:40:58 crc kubenswrapper[5133]: I1121 15:40:58.463836 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_591d47e3-df0d-4bc0-bc6a-c51c1f40c8a8/nova-scheduler-scheduler/0.log" Nov 21 15:40:58 crc kubenswrapper[5133]: I1121 15:40:58.533039 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_65613fe9-4492-410c-9be2-2d01dcb1e085/mysql-bootstrap/0.log" Nov 21 15:40:58 crc kubenswrapper[5133]: I1121 15:40:58.703261 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_65613fe9-4492-410c-9be2-2d01dcb1e085/mysql-bootstrap/0.log" Nov 21 15:40:58 crc kubenswrapper[5133]: I1121 15:40:58.721826 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_65613fe9-4492-410c-9be2-2d01dcb1e085/galera/0.log" Nov 21 15:40:58 crc kubenswrapper[5133]: I1121 15:40:58.903043 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_3afd96f5-effd-43e8-8986-b9fc1fd28233/mysql-bootstrap/0.log" Nov 21 15:40:59 crc kubenswrapper[5133]: I1121 15:40:59.116220 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_3afd96f5-effd-43e8-8986-b9fc1fd28233/mysql-bootstrap/0.log" Nov 21 15:40:59 crc kubenswrapper[5133]: I1121 15:40:59.169788 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_3afd96f5-effd-43e8-8986-b9fc1fd28233/galera/0.log" Nov 21 15:40:59 crc kubenswrapper[5133]: I1121 15:40:59.667250 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_d2c71aef-bcfe-42ba-8fcc-4fd36400f190/openstackclient/0.log" Nov 21 15:40:59 crc kubenswrapper[5133]: I1121 15:40:59.844102 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-2ckfn_6afb5f16-3806-4fbf-becf-8bf66576286f/ovn-controller/0.log" Nov 21 15:41:00 crc kubenswrapper[5133]: I1121 15:41:00.041561 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-xnvhj_77732c59-506b-4d76-9f92-efe5357ed5e9/openstack-network-exporter/0.log" Nov 21 15:41:00 crc kubenswrapper[5133]: I1121 15:41:00.181257 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-volume1-0_3bf15e01-8975-4617-b353-163613da4bc5/cinder-volume/0.log" Nov 21 15:41:00 crc kubenswrapper[5133]: I1121 15:41:00.227385 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-hbfsj_a1df22cc-e9c1-48b2-a807-ea8e93f8d366/ovsdb-server-init/0.log" Nov 21 15:41:00 crc kubenswrapper[5133]: I1121 15:41:00.389790 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-hbfsj_a1df22cc-e9c1-48b2-a807-ea8e93f8d366/ovsdb-server-init/0.log" Nov 21 15:41:00 crc kubenswrapper[5133]: I1121 15:41:00.442193 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-hbfsj_a1df22cc-e9c1-48b2-a807-ea8e93f8d366/ovs-vswitchd/0.log" Nov 21 15:41:00 crc kubenswrapper[5133]: I1121 15:41:00.471515 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-hbfsj_a1df22cc-e9c1-48b2-a807-ea8e93f8d366/ovsdb-server/0.log" Nov 21 15:41:00 crc kubenswrapper[5133]: I1121 15:41:00.659613 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-edpm-deployment-openstack-edpm-ipam-bhf62_0fec699f-ccdd-498f-8796-2c798f70d259/ovn-edpm-deployment-openstack-edpm-ipam/0.log" Nov 21 15:41:00 crc kubenswrapper[5133]: I1121 15:41:00.835321 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_ac4c9909-c38c-4076-a8a5-ee3aa66a9630/openstack-network-exporter/0.log" Nov 21 15:41:00 crc kubenswrapper[5133]: I1121 15:41:00.914088 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_ac4c9909-c38c-4076-a8a5-ee3aa66a9630/ovn-northd/0.log" Nov 21 15:41:01 crc kubenswrapper[5133]: I1121 15:41:01.050696 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_2e94f41c-a61c-4502-b9c6-11bcaf9054dd/openstack-network-exporter/0.log" Nov 21 15:41:01 crc kubenswrapper[5133]: I1121 15:41:01.136670 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_2e94f41c-a61c-4502-b9c6-11bcaf9054dd/ovsdbserver-nb/0.log" Nov 21 15:41:01 crc kubenswrapper[5133]: I1121 15:41:01.232675 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_799b923b-f086-45a3-b88d-01c78ab3b1f0/openstack-network-exporter/0.log" Nov 21 15:41:01 crc kubenswrapper[5133]: I1121 15:41:01.311492 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_799b923b-f086-45a3-b88d-01c78ab3b1f0/ovsdbserver-sb/0.log" Nov 21 15:41:01 crc kubenswrapper[5133]: I1121 15:41:01.401167 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_cda96e6d-092b-4f4a-8b74-e2c9771825f6/nova-metadata-metadata/0.log" Nov 21 15:41:01 crc kubenswrapper[5133]: I1121 15:41:01.541096 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-55d9467df6-xwjg4_a76b0db6-05bc-45f8-a7cb-38bd7280d541/placement-api/0.log" Nov 21 15:41:01 crc kubenswrapper[5133]: I1121 15:41:01.634607 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_45d066aa-3bb1-4fdc-8c88-c384dd156e93/setup-container/0.log" Nov 21 15:41:01 crc kubenswrapper[5133]: I1121 15:41:01.661820 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-55d9467df6-xwjg4_a76b0db6-05bc-45f8-a7cb-38bd7280d541/placement-log/0.log" Nov 21 15:41:01 crc kubenswrapper[5133]: I1121 15:41:01.827547 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_45d066aa-3bb1-4fdc-8c88-c384dd156e93/setup-container/0.log" Nov 21 15:41:01 crc kubenswrapper[5133]: I1121 15:41:01.868269 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_45d066aa-3bb1-4fdc-8c88-c384dd156e93/rabbitmq/0.log" Nov 21 15:41:01 crc kubenswrapper[5133]: I1121 15:41:01.917755 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_83679b73-67d4-4733-b362-44060f589afd/setup-container/0.log" Nov 21 15:41:02 crc kubenswrapper[5133]: I1121 15:41:02.137176 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_83679b73-67d4-4733-b362-44060f589afd/setup-container/0.log" Nov 21 15:41:02 crc kubenswrapper[5133]: I1121 15:41:02.160066 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_83679b73-67d4-4733-b362-44060f589afd/rabbitmq/0.log" Nov 21 15:41:02 crc kubenswrapper[5133]: I1121 15:41:02.213453 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-edpm-deployment-openstack-edpm-ipam-5ldjx_8c3c5c91-dbfc-49f4-ac40-95e59e1d1aa8/reboot-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 21 15:41:02 crc kubenswrapper[5133]: I1121 15:41:02.390234 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-rvnn5_d8943a9b-ac1b-4f72-86d3-01138d4223e1/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Nov 21 15:41:02 crc kubenswrapper[5133]: I1121 15:41:02.418782 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-edpm-deployment-openstack-edpm-ipam-gm5r2_060dc158-8f7a-4b81-9f16-ca7258cddeb5/run-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 21 15:41:02 crc kubenswrapper[5133]: I1121 15:41:02.586694 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-edpm-deployment-6kqrk_1b73a569-cd93-4222-bd53-77d391457324/ssh-known-hosts-edpm-deployment/0.log" Nov 21 15:41:02 crc kubenswrapper[5133]: I1121 15:41:02.744627 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tempest-tests-tempest_9c76c434-18e5-410a-9b4d-1538e6434c05/tempest-tests-tempest-tests-runner/0.log" Nov 21 15:41:02 crc kubenswrapper[5133]: I1121 15:41:02.865128 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_test-operator-logs-pod-tempest-tempest-tests-tempest_990c8e46-eae8-45b4-b847-dbaef496403e/test-operator-logs-container/0.log" Nov 21 15:41:02 crc kubenswrapper[5133]: I1121 15:41:02.971891 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-edpm-deployment-openstack-edpm-ipam-2ngdc_45a6b2e7-10f7-44d7-8293-bc3c32758294/validate-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 21 15:41:14 crc kubenswrapper[5133]: I1121 15:41:14.607071 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-wgbrs"] Nov 21 15:41:14 crc kubenswrapper[5133]: E1121 15:41:14.608201 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="68c857b0-c83a-4afa-8a74-0b5ea4b3caf3" containerName="container-00" Nov 21 15:41:14 crc kubenswrapper[5133]: I1121 15:41:14.608218 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="68c857b0-c83a-4afa-8a74-0b5ea4b3caf3" containerName="container-00" Nov 21 15:41:14 crc kubenswrapper[5133]: I1121 15:41:14.608433 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="68c857b0-c83a-4afa-8a74-0b5ea4b3caf3" containerName="container-00" Nov 21 15:41:14 crc kubenswrapper[5133]: I1121 15:41:14.610110 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wgbrs" Nov 21 15:41:14 crc kubenswrapper[5133]: I1121 15:41:14.621724 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wgbrs"] Nov 21 15:41:14 crc kubenswrapper[5133]: I1121 15:41:14.713383 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dd705a71-4d9f-40f8-abb4-a4c9ab3bab6f-utilities\") pod \"redhat-operators-wgbrs\" (UID: \"dd705a71-4d9f-40f8-abb4-a4c9ab3bab6f\") " pod="openshift-marketplace/redhat-operators-wgbrs" Nov 21 15:41:14 crc kubenswrapper[5133]: I1121 15:41:14.713482 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4slpw\" (UniqueName: \"kubernetes.io/projected/dd705a71-4d9f-40f8-abb4-a4c9ab3bab6f-kube-api-access-4slpw\") pod \"redhat-operators-wgbrs\" (UID: \"dd705a71-4d9f-40f8-abb4-a4c9ab3bab6f\") " pod="openshift-marketplace/redhat-operators-wgbrs" Nov 21 15:41:14 crc kubenswrapper[5133]: I1121 15:41:14.713529 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dd705a71-4d9f-40f8-abb4-a4c9ab3bab6f-catalog-content\") pod \"redhat-operators-wgbrs\" (UID: \"dd705a71-4d9f-40f8-abb4-a4c9ab3bab6f\") " pod="openshift-marketplace/redhat-operators-wgbrs" Nov 21 15:41:14 crc kubenswrapper[5133]: I1121 15:41:14.815013 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4slpw\" (UniqueName: \"kubernetes.io/projected/dd705a71-4d9f-40f8-abb4-a4c9ab3bab6f-kube-api-access-4slpw\") pod \"redhat-operators-wgbrs\" (UID: \"dd705a71-4d9f-40f8-abb4-a4c9ab3bab6f\") " pod="openshift-marketplace/redhat-operators-wgbrs" Nov 21 15:41:14 crc kubenswrapper[5133]: I1121 15:41:14.815384 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dd705a71-4d9f-40f8-abb4-a4c9ab3bab6f-catalog-content\") pod \"redhat-operators-wgbrs\" (UID: \"dd705a71-4d9f-40f8-abb4-a4c9ab3bab6f\") " pod="openshift-marketplace/redhat-operators-wgbrs" Nov 21 15:41:14 crc kubenswrapper[5133]: I1121 15:41:14.815764 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dd705a71-4d9f-40f8-abb4-a4c9ab3bab6f-utilities\") pod \"redhat-operators-wgbrs\" (UID: \"dd705a71-4d9f-40f8-abb4-a4c9ab3bab6f\") " pod="openshift-marketplace/redhat-operators-wgbrs" Nov 21 15:41:14 crc kubenswrapper[5133]: I1121 15:41:14.815840 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dd705a71-4d9f-40f8-abb4-a4c9ab3bab6f-catalog-content\") pod \"redhat-operators-wgbrs\" (UID: \"dd705a71-4d9f-40f8-abb4-a4c9ab3bab6f\") " pod="openshift-marketplace/redhat-operators-wgbrs" Nov 21 15:41:14 crc kubenswrapper[5133]: I1121 15:41:14.816078 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dd705a71-4d9f-40f8-abb4-a4c9ab3bab6f-utilities\") pod \"redhat-operators-wgbrs\" (UID: \"dd705a71-4d9f-40f8-abb4-a4c9ab3bab6f\") " pod="openshift-marketplace/redhat-operators-wgbrs" Nov 21 15:41:14 crc kubenswrapper[5133]: I1121 15:41:14.834601 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4slpw\" (UniqueName: \"kubernetes.io/projected/dd705a71-4d9f-40f8-abb4-a4c9ab3bab6f-kube-api-access-4slpw\") pod \"redhat-operators-wgbrs\" (UID: \"dd705a71-4d9f-40f8-abb4-a4c9ab3bab6f\") " pod="openshift-marketplace/redhat-operators-wgbrs" Nov 21 15:41:14 crc kubenswrapper[5133]: I1121 15:41:14.934533 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wgbrs" Nov 21 15:41:15 crc kubenswrapper[5133]: I1121 15:41:15.338710 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_507698df-bffb-4ed6-bbad-7e62bb1875f7/memcached/0.log" Nov 21 15:41:15 crc kubenswrapper[5133]: I1121 15:41:15.407824 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wgbrs"] Nov 21 15:41:16 crc kubenswrapper[5133]: I1121 15:41:16.009927 5133 generic.go:334] "Generic (PLEG): container finished" podID="dd705a71-4d9f-40f8-abb4-a4c9ab3bab6f" containerID="78150b9e4ef719025fc4a7069fd64e394738ba70e7a0ba50c7bb67dba265512d" exitCode=0 Nov 21 15:41:16 crc kubenswrapper[5133]: I1121 15:41:16.010021 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wgbrs" event={"ID":"dd705a71-4d9f-40f8-abb4-a4c9ab3bab6f","Type":"ContainerDied","Data":"78150b9e4ef719025fc4a7069fd64e394738ba70e7a0ba50c7bb67dba265512d"} Nov 21 15:41:16 crc kubenswrapper[5133]: I1121 15:41:16.010363 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wgbrs" event={"ID":"dd705a71-4d9f-40f8-abb4-a4c9ab3bab6f","Type":"ContainerStarted","Data":"9a0c50924c56ea6524591f3bfc76118cd983640d7029ed535c4287850d141751"} Nov 21 15:41:16 crc kubenswrapper[5133]: I1121 15:41:16.011641 5133 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 21 15:41:17 crc kubenswrapper[5133]: I1121 15:41:17.022551 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wgbrs" event={"ID":"dd705a71-4d9f-40f8-abb4-a4c9ab3bab6f","Type":"ContainerStarted","Data":"0c57043c312b244c3e6136773fb0e7f8f8616c27f37e134273e7c229e6d76145"} Nov 21 15:41:18 crc kubenswrapper[5133]: I1121 15:41:18.032777 5133 generic.go:334] "Generic (PLEG): container finished" podID="dd705a71-4d9f-40f8-abb4-a4c9ab3bab6f" containerID="0c57043c312b244c3e6136773fb0e7f8f8616c27f37e134273e7c229e6d76145" exitCode=0 Nov 21 15:41:18 crc kubenswrapper[5133]: I1121 15:41:18.032825 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wgbrs" event={"ID":"dd705a71-4d9f-40f8-abb4-a4c9ab3bab6f","Type":"ContainerDied","Data":"0c57043c312b244c3e6136773fb0e7f8f8616c27f37e134273e7c229e6d76145"} Nov 21 15:41:23 crc kubenswrapper[5133]: I1121 15:41:23.085819 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wgbrs" event={"ID":"dd705a71-4d9f-40f8-abb4-a4c9ab3bab6f","Type":"ContainerStarted","Data":"1e60ad07f78a91e527fd0febba2ba4ed4d42ddc2133b6e3ac339157be8af52f5"} Nov 21 15:41:23 crc kubenswrapper[5133]: I1121 15:41:23.114358 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-wgbrs" podStartSLOduration=2.482640536 podStartE2EDuration="9.114330153s" podCreationTimestamp="2025-11-21 15:41:14 +0000 UTC" firstStartedPulling="2025-11-21 15:41:16.011450026 +0000 UTC m=+7135.809282274" lastFinishedPulling="2025-11-21 15:41:22.643139643 +0000 UTC m=+7142.440971891" observedRunningTime="2025-11-21 15:41:23.111343714 +0000 UTC m=+7142.909175972" watchObservedRunningTime="2025-11-21 15:41:23.114330153 +0000 UTC m=+7142.912162431" Nov 21 15:41:24 crc kubenswrapper[5133]: I1121 15:41:24.936303 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-wgbrs" Nov 21 15:41:24 crc kubenswrapper[5133]: I1121 15:41:24.937207 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-wgbrs" Nov 21 15:41:26 crc kubenswrapper[5133]: I1121 15:41:26.017096 5133 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-wgbrs" podUID="dd705a71-4d9f-40f8-abb4-a4c9ab3bab6f" containerName="registry-server" probeResult="failure" output=< Nov 21 15:41:26 crc kubenswrapper[5133]: timeout: failed to connect service ":50051" within 1s Nov 21 15:41:26 crc kubenswrapper[5133]: > Nov 21 15:41:26 crc kubenswrapper[5133]: I1121 15:41:26.680771 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_8918bc7d41988dccd7f823c453a3239d0c23cc745e2adb37204c019cb4hc86t_80d4af44-47ac-4fcf-a581-b22abd5ca264/util/0.log" Nov 21 15:41:26 crc kubenswrapper[5133]: I1121 15:41:26.784533 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_8918bc7d41988dccd7f823c453a3239d0c23cc745e2adb37204c019cb4hc86t_80d4af44-47ac-4fcf-a581-b22abd5ca264/util/0.log" Nov 21 15:41:26 crc kubenswrapper[5133]: I1121 15:41:26.872486 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_8918bc7d41988dccd7f823c453a3239d0c23cc745e2adb37204c019cb4hc86t_80d4af44-47ac-4fcf-a581-b22abd5ca264/pull/0.log" Nov 21 15:41:26 crc kubenswrapper[5133]: I1121 15:41:26.882516 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_8918bc7d41988dccd7f823c453a3239d0c23cc745e2adb37204c019cb4hc86t_80d4af44-47ac-4fcf-a581-b22abd5ca264/pull/0.log" Nov 21 15:41:27 crc kubenswrapper[5133]: I1121 15:41:27.051539 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_8918bc7d41988dccd7f823c453a3239d0c23cc745e2adb37204c019cb4hc86t_80d4af44-47ac-4fcf-a581-b22abd5ca264/util/0.log" Nov 21 15:41:27 crc kubenswrapper[5133]: I1121 15:41:27.062615 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_8918bc7d41988dccd7f823c453a3239d0c23cc745e2adb37204c019cb4hc86t_80d4af44-47ac-4fcf-a581-b22abd5ca264/pull/0.log" Nov 21 15:41:27 crc kubenswrapper[5133]: I1121 15:41:27.066993 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_8918bc7d41988dccd7f823c453a3239d0c23cc745e2adb37204c019cb4hc86t_80d4af44-47ac-4fcf-a581-b22abd5ca264/extract/0.log" Nov 21 15:41:27 crc kubenswrapper[5133]: I1121 15:41:27.263756 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-86dc4d89c8-znxpd_99e612e9-ba8d-41cd-9654-4332a4132c4f/manager/0.log" Nov 21 15:41:27 crc kubenswrapper[5133]: I1121 15:41:27.265168 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-86dc4d89c8-znxpd_99e612e9-ba8d-41cd-9654-4332a4132c4f/kube-rbac-proxy/0.log" Nov 21 15:41:27 crc kubenswrapper[5133]: I1121 15:41:27.334965 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-5c575964b7-fcp4p_50fccca4-2734-4839-83b0-d220b0dfa1d6/kube-rbac-proxy/0.log" Nov 21 15:41:27 crc kubenswrapper[5133]: I1121 15:41:27.482492 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-5c575964b7-fcp4p_50fccca4-2734-4839-83b0-d220b0dfa1d6/manager/0.log" Nov 21 15:41:27 crc kubenswrapper[5133]: I1121 15:41:27.490360 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-7d695c9b56-l7bft_fbcf38a1-6730-49b5-bf19-9966465f2d1b/kube-rbac-proxy/0.log" Nov 21 15:41:27 crc kubenswrapper[5133]: I1121 15:41:27.698378 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-7d695c9b56-l7bft_fbcf38a1-6730-49b5-bf19-9966465f2d1b/manager/0.log" Nov 21 15:41:27 crc kubenswrapper[5133]: I1121 15:41:27.872271 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-68b95954c9-g6kng_abdbcac3-a654-4558-b71c-6b0a4d6c9c19/kube-rbac-proxy/0.log" Nov 21 15:41:27 crc kubenswrapper[5133]: I1121 15:41:27.881902 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-68b95954c9-g6kng_abdbcac3-a654-4558-b71c-6b0a4d6c9c19/manager/0.log" Nov 21 15:41:28 crc kubenswrapper[5133]: I1121 15:41:28.043680 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-774b86978c-q9q7l_156c8963-8a3a-4cb2-93ca-a14ab1e2c4a8/kube-rbac-proxy/0.log" Nov 21 15:41:28 crc kubenswrapper[5133]: I1121 15:41:28.045528 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-774b86978c-q9q7l_156c8963-8a3a-4cb2-93ca-a14ab1e2c4a8/manager/0.log" Nov 21 15:41:28 crc kubenswrapper[5133]: I1121 15:41:28.154491 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-68c9694994-4h6lz_701735b8-41e5-47f9-9ac7-b8cfc0357597/kube-rbac-proxy/0.log" Nov 21 15:41:28 crc kubenswrapper[5133]: I1121 15:41:28.220698 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-68c9694994-4h6lz_701735b8-41e5-47f9-9ac7-b8cfc0357597/manager/0.log" Nov 21 15:41:28 crc kubenswrapper[5133]: I1121 15:41:28.303632 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-d5cc86f4b-7g4lm_0b72b567-d244-4d7e-984b-ba42dfb7be25/kube-rbac-proxy/0.log" Nov 21 15:41:28 crc kubenswrapper[5133]: I1121 15:41:28.452810 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-5bfcdc958c-glgx8_d1eae28c-1654-4cfc-a380-c56e52bdd2d5/kube-rbac-proxy/0.log" Nov 21 15:41:28 crc kubenswrapper[5133]: I1121 15:41:28.461532 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-d5cc86f4b-7g4lm_0b72b567-d244-4d7e-984b-ba42dfb7be25/manager/0.log" Nov 21 15:41:28 crc kubenswrapper[5133]: I1121 15:41:28.546624 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-5bfcdc958c-glgx8_d1eae28c-1654-4cfc-a380-c56e52bdd2d5/manager/0.log" Nov 21 15:41:28 crc kubenswrapper[5133]: I1121 15:41:28.627300 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-748dc6576f-gqfpz_28c76d9f-b15c-4b67-b616-88f89fea7eb7/kube-rbac-proxy/0.log" Nov 21 15:41:28 crc kubenswrapper[5133]: I1121 15:41:28.670031 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-748dc6576f-gqfpz_28c76d9f-b15c-4b67-b616-88f89fea7eb7/manager/0.log" Nov 21 15:41:28 crc kubenswrapper[5133]: I1121 15:41:28.787830 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-58bb8d67cc-mj6sx_641bce51-c8fb-4956-a13d-e42d7204b3d2/kube-rbac-proxy/0.log" Nov 21 15:41:28 crc kubenswrapper[5133]: I1121 15:41:28.918564 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-58bb8d67cc-mj6sx_641bce51-c8fb-4956-a13d-e42d7204b3d2/manager/0.log" Nov 21 15:41:28 crc kubenswrapper[5133]: I1121 15:41:28.985166 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-cb6c4fdb7-mwsrn_a7a7378a-ba96-4a74-9730-61c7e0215843/kube-rbac-proxy/0.log" Nov 21 15:41:29 crc kubenswrapper[5133]: I1121 15:41:29.016031 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-cb6c4fdb7-mwsrn_a7a7378a-ba96-4a74-9730-61c7e0215843/manager/0.log" Nov 21 15:41:29 crc kubenswrapper[5133]: I1121 15:41:29.157258 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-7c57c8bbc4-vt2kw_2da1ddc9-a310-4a6a-90de-b63c8b33448a/kube-rbac-proxy/0.log" Nov 21 15:41:29 crc kubenswrapper[5133]: I1121 15:41:29.249208 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-7c57c8bbc4-vt2kw_2da1ddc9-a310-4a6a-90de-b63c8b33448a/manager/0.log" Nov 21 15:41:29 crc kubenswrapper[5133]: I1121 15:41:29.295295 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79556f57fc-vpjk7_2b5268ec-12ae-4aee-84f2-f176c3e8f1c3/kube-rbac-proxy/0.log" Nov 21 15:41:29 crc kubenswrapper[5133]: I1121 15:41:29.452698 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79556f57fc-vpjk7_2b5268ec-12ae-4aee-84f2-f176c3e8f1c3/manager/0.log" Nov 21 15:41:29 crc kubenswrapper[5133]: I1121 15:41:29.474586 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-fd75fd47d-mxphc_a076ae54-b994-453a-9361-2bf9acab8d2d/kube-rbac-proxy/0.log" Nov 21 15:41:29 crc kubenswrapper[5133]: I1121 15:41:29.524869 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-fd75fd47d-mxphc_a076ae54-b994-453a-9361-2bf9acab8d2d/manager/0.log" Nov 21 15:41:29 crc kubenswrapper[5133]: I1121 15:41:29.654380 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-544b9bb9-5z85m_2036671e-8670-48c3-af60-5eee8087efa7/kube-rbac-proxy/0.log" Nov 21 15:41:29 crc kubenswrapper[5133]: I1121 15:41:29.668634 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-544b9bb9-5z85m_2036671e-8670-48c3-af60-5eee8087efa7/manager/0.log" Nov 21 15:41:30 crc kubenswrapper[5133]: I1121 15:41:30.016768 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-6d9c469469-jzjfk_93d86db4-2649-43aa-b252-6a288a505bec/operator/0.log" Nov 21 15:41:30 crc kubenswrapper[5133]: I1121 15:41:30.055673 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-r5rkm_a92d6765-b45f-43f2-a1ae-c55689f599cb/registry-server/0.log" Nov 21 15:41:30 crc kubenswrapper[5133]: I1121 15:41:30.194655 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-66cf5c67ff-tzn9t_71d8bd8e-380d-458b-93b8-7e1a68964294/kube-rbac-proxy/0.log" Nov 21 15:41:30 crc kubenswrapper[5133]: I1121 15:41:30.349771 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-66cf5c67ff-tzn9t_71d8bd8e-380d-458b-93b8-7e1a68964294/manager/0.log" Nov 21 15:41:30 crc kubenswrapper[5133]: I1121 15:41:30.478603 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-5db546f9d9-kkk8d_3561dc03-64fa-4b51-b14c-02ef7dc87280/kube-rbac-proxy/0.log" Nov 21 15:41:30 crc kubenswrapper[5133]: I1121 15:41:30.573367 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-5db546f9d9-kkk8d_3561dc03-64fa-4b51-b14c-02ef7dc87280/manager/0.log" Nov 21 15:41:30 crc kubenswrapper[5133]: I1121 15:41:30.731209 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-lfntj_167b5c97-691f-4078-ac5f-e849a162b136/operator/0.log" Nov 21 15:41:30 crc kubenswrapper[5133]: I1121 15:41:30.928749 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-6fdc4fcf86-vpq2d_167387d2-835f-4d70-8a59-de71037b8178/kube-rbac-proxy/0.log" Nov 21 15:41:30 crc kubenswrapper[5133]: I1121 15:41:30.930084 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-6fdc4fcf86-vpq2d_167387d2-835f-4d70-8a59-de71037b8178/manager/0.log" Nov 21 15:41:31 crc kubenswrapper[5133]: I1121 15:41:31.049144 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-567f98c9d-772lb_6f45172d-e807-4bb7-b836-a2a6a53beccf/kube-rbac-proxy/0.log" Nov 21 15:41:31 crc kubenswrapper[5133]: I1121 15:41:31.226840 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5cb74df96-448nh_d8e037c0-d4af-4d74-9e8f-e749db78b81d/kube-rbac-proxy/0.log" Nov 21 15:41:31 crc kubenswrapper[5133]: I1121 15:41:31.261638 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-567f98c9d-772lb_6f45172d-e807-4bb7-b836-a2a6a53beccf/manager/0.log" Nov 21 15:41:31 crc kubenswrapper[5133]: I1121 15:41:31.305866 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-679bdf8cc4-fsfb6_1993c427-2d4f-4f20-9390-e7d67d4a6fe2/manager/0.log" Nov 21 15:41:31 crc kubenswrapper[5133]: I1121 15:41:31.322671 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5cb74df96-448nh_d8e037c0-d4af-4d74-9e8f-e749db78b81d/manager/0.log" Nov 21 15:41:31 crc kubenswrapper[5133]: I1121 15:41:31.422959 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-864885998-g2fdl_f2e66c49-bdd0-408a-867b-6e4c869df2a7/kube-rbac-proxy/0.log" Nov 21 15:41:31 crc kubenswrapper[5133]: I1121 15:41:31.506940 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-864885998-g2fdl_f2e66c49-bdd0-408a-867b-6e4c869df2a7/manager/0.log" Nov 21 15:41:35 crc kubenswrapper[5133]: I1121 15:41:35.977164 5133 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-wgbrs" podUID="dd705a71-4d9f-40f8-abb4-a4c9ab3bab6f" containerName="registry-server" probeResult="failure" output=< Nov 21 15:41:35 crc kubenswrapper[5133]: timeout: failed to connect service ":50051" within 1s Nov 21 15:41:35 crc kubenswrapper[5133]: > Nov 21 15:41:45 crc kubenswrapper[5133]: I1121 15:41:45.015313 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-wgbrs" Nov 21 15:41:45 crc kubenswrapper[5133]: I1121 15:41:45.073240 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-wgbrs" Nov 21 15:41:45 crc kubenswrapper[5133]: I1121 15:41:45.781317 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wgbrs"] Nov 21 15:41:46 crc kubenswrapper[5133]: I1121 15:41:46.282427 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-wgbrs" podUID="dd705a71-4d9f-40f8-abb4-a4c9ab3bab6f" containerName="registry-server" containerID="cri-o://1e60ad07f78a91e527fd0febba2ba4ed4d42ddc2133b6e3ac339157be8af52f5" gracePeriod=2 Nov 21 15:41:46 crc kubenswrapper[5133]: I1121 15:41:46.630258 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-sspkx_5686b316-695d-4eab-a619-1033b90d6d96/control-plane-machine-set-operator/0.log" Nov 21 15:41:46 crc kubenswrapper[5133]: I1121 15:41:46.742221 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wgbrs" Nov 21 15:41:46 crc kubenswrapper[5133]: I1121 15:41:46.836488 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-w26jv_576691ca-af69-46de-b8c2-5b2195e3db0b/kube-rbac-proxy/0.log" Nov 21 15:41:46 crc kubenswrapper[5133]: I1121 15:41:46.865910 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dd705a71-4d9f-40f8-abb4-a4c9ab3bab6f-utilities\") pod \"dd705a71-4d9f-40f8-abb4-a4c9ab3bab6f\" (UID: \"dd705a71-4d9f-40f8-abb4-a4c9ab3bab6f\") " Nov 21 15:41:46 crc kubenswrapper[5133]: I1121 15:41:46.866011 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4slpw\" (UniqueName: \"kubernetes.io/projected/dd705a71-4d9f-40f8-abb4-a4c9ab3bab6f-kube-api-access-4slpw\") pod \"dd705a71-4d9f-40f8-abb4-a4c9ab3bab6f\" (UID: \"dd705a71-4d9f-40f8-abb4-a4c9ab3bab6f\") " Nov 21 15:41:46 crc kubenswrapper[5133]: I1121 15:41:46.866175 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dd705a71-4d9f-40f8-abb4-a4c9ab3bab6f-catalog-content\") pod \"dd705a71-4d9f-40f8-abb4-a4c9ab3bab6f\" (UID: \"dd705a71-4d9f-40f8-abb4-a4c9ab3bab6f\") " Nov 21 15:41:46 crc kubenswrapper[5133]: I1121 15:41:46.866578 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dd705a71-4d9f-40f8-abb4-a4c9ab3bab6f-utilities" (OuterVolumeSpecName: "utilities") pod "dd705a71-4d9f-40f8-abb4-a4c9ab3bab6f" (UID: "dd705a71-4d9f-40f8-abb4-a4c9ab3bab6f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:41:46 crc kubenswrapper[5133]: I1121 15:41:46.873056 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dd705a71-4d9f-40f8-abb4-a4c9ab3bab6f-kube-api-access-4slpw" (OuterVolumeSpecName: "kube-api-access-4slpw") pod "dd705a71-4d9f-40f8-abb4-a4c9ab3bab6f" (UID: "dd705a71-4d9f-40f8-abb4-a4c9ab3bab6f"). InnerVolumeSpecName "kube-api-access-4slpw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:41:46 crc kubenswrapper[5133]: I1121 15:41:46.886341 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-w26jv_576691ca-af69-46de-b8c2-5b2195e3db0b/machine-api-operator/0.log" Nov 21 15:41:46 crc kubenswrapper[5133]: I1121 15:41:46.952512 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dd705a71-4d9f-40f8-abb4-a4c9ab3bab6f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "dd705a71-4d9f-40f8-abb4-a4c9ab3bab6f" (UID: "dd705a71-4d9f-40f8-abb4-a4c9ab3bab6f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:41:46 crc kubenswrapper[5133]: I1121 15:41:46.968478 5133 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dd705a71-4d9f-40f8-abb4-a4c9ab3bab6f-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 15:41:46 crc kubenswrapper[5133]: I1121 15:41:46.968727 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4slpw\" (UniqueName: \"kubernetes.io/projected/dd705a71-4d9f-40f8-abb4-a4c9ab3bab6f-kube-api-access-4slpw\") on node \"crc\" DevicePath \"\"" Nov 21 15:41:46 crc kubenswrapper[5133]: I1121 15:41:46.968792 5133 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dd705a71-4d9f-40f8-abb4-a4c9ab3bab6f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 15:41:47 crc kubenswrapper[5133]: I1121 15:41:47.291629 5133 generic.go:334] "Generic (PLEG): container finished" podID="dd705a71-4d9f-40f8-abb4-a4c9ab3bab6f" containerID="1e60ad07f78a91e527fd0febba2ba4ed4d42ddc2133b6e3ac339157be8af52f5" exitCode=0 Nov 21 15:41:47 crc kubenswrapper[5133]: I1121 15:41:47.291671 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wgbrs" event={"ID":"dd705a71-4d9f-40f8-abb4-a4c9ab3bab6f","Type":"ContainerDied","Data":"1e60ad07f78a91e527fd0febba2ba4ed4d42ddc2133b6e3ac339157be8af52f5"} Nov 21 15:41:47 crc kubenswrapper[5133]: I1121 15:41:47.291696 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wgbrs" event={"ID":"dd705a71-4d9f-40f8-abb4-a4c9ab3bab6f","Type":"ContainerDied","Data":"9a0c50924c56ea6524591f3bfc76118cd983640d7029ed535c4287850d141751"} Nov 21 15:41:47 crc kubenswrapper[5133]: I1121 15:41:47.291711 5133 scope.go:117] "RemoveContainer" containerID="1e60ad07f78a91e527fd0febba2ba4ed4d42ddc2133b6e3ac339157be8af52f5" Nov 21 15:41:47 crc kubenswrapper[5133]: I1121 15:41:47.291844 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wgbrs" Nov 21 15:41:47 crc kubenswrapper[5133]: I1121 15:41:47.315683 5133 scope.go:117] "RemoveContainer" containerID="0c57043c312b244c3e6136773fb0e7f8f8616c27f37e134273e7c229e6d76145" Nov 21 15:41:47 crc kubenswrapper[5133]: I1121 15:41:47.333270 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wgbrs"] Nov 21 15:41:47 crc kubenswrapper[5133]: I1121 15:41:47.341317 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-wgbrs"] Nov 21 15:41:47 crc kubenswrapper[5133]: I1121 15:41:47.350839 5133 scope.go:117] "RemoveContainer" containerID="78150b9e4ef719025fc4a7069fd64e394738ba70e7a0ba50c7bb67dba265512d" Nov 21 15:41:47 crc kubenswrapper[5133]: I1121 15:41:47.384641 5133 scope.go:117] "RemoveContainer" containerID="1e60ad07f78a91e527fd0febba2ba4ed4d42ddc2133b6e3ac339157be8af52f5" Nov 21 15:41:47 crc kubenswrapper[5133]: E1121 15:41:47.385082 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1e60ad07f78a91e527fd0febba2ba4ed4d42ddc2133b6e3ac339157be8af52f5\": container with ID starting with 1e60ad07f78a91e527fd0febba2ba4ed4d42ddc2133b6e3ac339157be8af52f5 not found: ID does not exist" containerID="1e60ad07f78a91e527fd0febba2ba4ed4d42ddc2133b6e3ac339157be8af52f5" Nov 21 15:41:47 crc kubenswrapper[5133]: I1121 15:41:47.385123 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1e60ad07f78a91e527fd0febba2ba4ed4d42ddc2133b6e3ac339157be8af52f5"} err="failed to get container status \"1e60ad07f78a91e527fd0febba2ba4ed4d42ddc2133b6e3ac339157be8af52f5\": rpc error: code = NotFound desc = could not find container \"1e60ad07f78a91e527fd0febba2ba4ed4d42ddc2133b6e3ac339157be8af52f5\": container with ID starting with 1e60ad07f78a91e527fd0febba2ba4ed4d42ddc2133b6e3ac339157be8af52f5 not found: ID does not exist" Nov 21 15:41:47 crc kubenswrapper[5133]: I1121 15:41:47.385146 5133 scope.go:117] "RemoveContainer" containerID="0c57043c312b244c3e6136773fb0e7f8f8616c27f37e134273e7c229e6d76145" Nov 21 15:41:47 crc kubenswrapper[5133]: E1121 15:41:47.385550 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0c57043c312b244c3e6136773fb0e7f8f8616c27f37e134273e7c229e6d76145\": container with ID starting with 0c57043c312b244c3e6136773fb0e7f8f8616c27f37e134273e7c229e6d76145 not found: ID does not exist" containerID="0c57043c312b244c3e6136773fb0e7f8f8616c27f37e134273e7c229e6d76145" Nov 21 15:41:47 crc kubenswrapper[5133]: I1121 15:41:47.385582 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0c57043c312b244c3e6136773fb0e7f8f8616c27f37e134273e7c229e6d76145"} err="failed to get container status \"0c57043c312b244c3e6136773fb0e7f8f8616c27f37e134273e7c229e6d76145\": rpc error: code = NotFound desc = could not find container \"0c57043c312b244c3e6136773fb0e7f8f8616c27f37e134273e7c229e6d76145\": container with ID starting with 0c57043c312b244c3e6136773fb0e7f8f8616c27f37e134273e7c229e6d76145 not found: ID does not exist" Nov 21 15:41:47 crc kubenswrapper[5133]: I1121 15:41:47.385603 5133 scope.go:117] "RemoveContainer" containerID="78150b9e4ef719025fc4a7069fd64e394738ba70e7a0ba50c7bb67dba265512d" Nov 21 15:41:47 crc kubenswrapper[5133]: E1121 15:41:47.386138 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"78150b9e4ef719025fc4a7069fd64e394738ba70e7a0ba50c7bb67dba265512d\": container with ID starting with 78150b9e4ef719025fc4a7069fd64e394738ba70e7a0ba50c7bb67dba265512d not found: ID does not exist" containerID="78150b9e4ef719025fc4a7069fd64e394738ba70e7a0ba50c7bb67dba265512d" Nov 21 15:41:47 crc kubenswrapper[5133]: I1121 15:41:47.386163 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"78150b9e4ef719025fc4a7069fd64e394738ba70e7a0ba50c7bb67dba265512d"} err="failed to get container status \"78150b9e4ef719025fc4a7069fd64e394738ba70e7a0ba50c7bb67dba265512d\": rpc error: code = NotFound desc = could not find container \"78150b9e4ef719025fc4a7069fd64e394738ba70e7a0ba50c7bb67dba265512d\": container with ID starting with 78150b9e4ef719025fc4a7069fd64e394738ba70e7a0ba50c7bb67dba265512d not found: ID does not exist" Nov 21 15:41:48 crc kubenswrapper[5133]: I1121 15:41:48.467476 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dd705a71-4d9f-40f8-abb4-a4c9ab3bab6f" path="/var/lib/kubelet/pods/dd705a71-4d9f-40f8-abb4-a4c9ab3bab6f/volumes" Nov 21 15:41:53 crc kubenswrapper[5133]: I1121 15:41:53.311298 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 15:41:53 crc kubenswrapper[5133]: I1121 15:41:53.311858 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 15:41:57 crc kubenswrapper[5133]: I1121 15:41:57.968012 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-5b446d88c5-t5gk2_877c323d-4f37-4803-8376-43e5946404e2/cert-manager-controller/0.log" Nov 21 15:41:58 crc kubenswrapper[5133]: I1121 15:41:58.108491 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-7f985d654d-mvs62_f6bf4994-1e4e-4b36-96f8-db4c09620dc6/cert-manager-cainjector/0.log" Nov 21 15:41:58 crc kubenswrapper[5133]: I1121 15:41:58.159225 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-5655c58dd6-k9w95_a3ddb6ce-5d67-47b1-8550-56613fa11579/cert-manager-webhook/0.log" Nov 21 15:42:09 crc kubenswrapper[5133]: I1121 15:42:09.648795 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-5874bd7bc5-9vb4n_d22f52b5-ba71-44ff-8877-63ab436f5683/nmstate-console-plugin/0.log" Nov 21 15:42:09 crc kubenswrapper[5133]: I1121 15:42:09.804187 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-pb5nh_0b59ced3-80c3-4991-8005-600e0d36c2b3/nmstate-handler/0.log" Nov 21 15:42:09 crc kubenswrapper[5133]: I1121 15:42:09.868637 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-txndq_db154bb8-c484-469b-89f3-d84a11ff8eae/kube-rbac-proxy/0.log" Nov 21 15:42:09 crc kubenswrapper[5133]: I1121 15:42:09.915352 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-txndq_db154bb8-c484-469b-89f3-d84a11ff8eae/nmstate-metrics/0.log" Nov 21 15:42:10 crc kubenswrapper[5133]: I1121 15:42:10.065548 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-557fdffb88-cwv52_d242e3ae-cf32-4eea-8360-b0f6fab0d5af/nmstate-operator/0.log" Nov 21 15:42:10 crc kubenswrapper[5133]: I1121 15:42:10.081963 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-6b89b748d8-b474p_b8d952ba-d2a8-46a8-b2de-b47a5ad448b7/nmstate-webhook/0.log" Nov 21 15:42:23 crc kubenswrapper[5133]: I1121 15:42:23.179214 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-h9nrl_f18f6a68-8b84-4830-a504-70170e7e0125/kube-rbac-proxy/0.log" Nov 21 15:42:23 crc kubenswrapper[5133]: I1121 15:42:23.310330 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 15:42:23 crc kubenswrapper[5133]: I1121 15:42:23.310691 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 15:42:23 crc kubenswrapper[5133]: I1121 15:42:23.346715 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-h9nrl_f18f6a68-8b84-4830-a504-70170e7e0125/controller/0.log" Nov 21 15:42:23 crc kubenswrapper[5133]: I1121 15:42:23.412329 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9sscr_48504bed-9d84-42d4-8ec2-c98bcb981b11/cp-frr-files/0.log" Nov 21 15:42:23 crc kubenswrapper[5133]: I1121 15:42:23.562227 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9sscr_48504bed-9d84-42d4-8ec2-c98bcb981b11/cp-frr-files/0.log" Nov 21 15:42:23 crc kubenswrapper[5133]: I1121 15:42:23.568482 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9sscr_48504bed-9d84-42d4-8ec2-c98bcb981b11/cp-reloader/0.log" Nov 21 15:42:23 crc kubenswrapper[5133]: I1121 15:42:23.609831 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9sscr_48504bed-9d84-42d4-8ec2-c98bcb981b11/cp-metrics/0.log" Nov 21 15:42:23 crc kubenswrapper[5133]: I1121 15:42:23.615262 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9sscr_48504bed-9d84-42d4-8ec2-c98bcb981b11/cp-reloader/0.log" Nov 21 15:42:23 crc kubenswrapper[5133]: I1121 15:42:23.841430 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9sscr_48504bed-9d84-42d4-8ec2-c98bcb981b11/cp-metrics/0.log" Nov 21 15:42:23 crc kubenswrapper[5133]: I1121 15:42:23.843694 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9sscr_48504bed-9d84-42d4-8ec2-c98bcb981b11/cp-reloader/0.log" Nov 21 15:42:23 crc kubenswrapper[5133]: I1121 15:42:23.844582 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9sscr_48504bed-9d84-42d4-8ec2-c98bcb981b11/cp-metrics/0.log" Nov 21 15:42:23 crc kubenswrapper[5133]: I1121 15:42:23.847719 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9sscr_48504bed-9d84-42d4-8ec2-c98bcb981b11/cp-frr-files/0.log" Nov 21 15:42:24 crc kubenswrapper[5133]: I1121 15:42:24.001733 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9sscr_48504bed-9d84-42d4-8ec2-c98bcb981b11/cp-frr-files/0.log" Nov 21 15:42:24 crc kubenswrapper[5133]: I1121 15:42:24.017455 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9sscr_48504bed-9d84-42d4-8ec2-c98bcb981b11/cp-reloader/0.log" Nov 21 15:42:24 crc kubenswrapper[5133]: I1121 15:42:24.041730 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9sscr_48504bed-9d84-42d4-8ec2-c98bcb981b11/controller/0.log" Nov 21 15:42:24 crc kubenswrapper[5133]: I1121 15:42:24.042923 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9sscr_48504bed-9d84-42d4-8ec2-c98bcb981b11/cp-metrics/0.log" Nov 21 15:42:24 crc kubenswrapper[5133]: I1121 15:42:24.170372 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9sscr_48504bed-9d84-42d4-8ec2-c98bcb981b11/frr-metrics/0.log" Nov 21 15:42:24 crc kubenswrapper[5133]: I1121 15:42:24.212206 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9sscr_48504bed-9d84-42d4-8ec2-c98bcb981b11/kube-rbac-proxy-frr/0.log" Nov 21 15:42:24 crc kubenswrapper[5133]: I1121 15:42:24.253250 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9sscr_48504bed-9d84-42d4-8ec2-c98bcb981b11/kube-rbac-proxy/0.log" Nov 21 15:42:24 crc kubenswrapper[5133]: I1121 15:42:24.399802 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9sscr_48504bed-9d84-42d4-8ec2-c98bcb981b11/reloader/0.log" Nov 21 15:42:24 crc kubenswrapper[5133]: I1121 15:42:24.463040 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-6998585d5-9drp9_67474cce-10bd-4da6-895f-a7e465d362a6/frr-k8s-webhook-server/0.log" Nov 21 15:42:24 crc kubenswrapper[5133]: I1121 15:42:24.705979 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-5468dfbc5b-plbnr_fb96f30b-10d1-4d5a-a909-c718939fd900/manager/0.log" Nov 21 15:42:24 crc kubenswrapper[5133]: I1121 15:42:24.852184 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-5d4d9648d8-8trqg_88d867e4-8f6e-407b-b65f-b87a47d2c578/webhook-server/0.log" Nov 21 15:42:24 crc kubenswrapper[5133]: I1121 15:42:24.895853 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-nstmv_3a9b4a92-1fba-4b32-94c3-be5343dae8d2/kube-rbac-proxy/0.log" Nov 21 15:42:25 crc kubenswrapper[5133]: I1121 15:42:25.468902 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-nstmv_3a9b4a92-1fba-4b32-94c3-be5343dae8d2/speaker/0.log" Nov 21 15:42:25 crc kubenswrapper[5133]: I1121 15:42:25.970591 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-9sscr_48504bed-9d84-42d4-8ec2-c98bcb981b11/frr/0.log" Nov 21 15:42:37 crc kubenswrapper[5133]: I1121 15:42:37.244541 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprr57_2664fda7-42fc-42a7-9c72-ccd5ce36a862/util/0.log" Nov 21 15:42:37 crc kubenswrapper[5133]: I1121 15:42:37.457748 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprr57_2664fda7-42fc-42a7-9c72-ccd5ce36a862/pull/0.log" Nov 21 15:42:37 crc kubenswrapper[5133]: I1121 15:42:37.472384 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprr57_2664fda7-42fc-42a7-9c72-ccd5ce36a862/pull/0.log" Nov 21 15:42:37 crc kubenswrapper[5133]: I1121 15:42:37.475196 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprr57_2664fda7-42fc-42a7-9c72-ccd5ce36a862/util/0.log" Nov 21 15:42:37 crc kubenswrapper[5133]: I1121 15:42:37.657567 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprr57_2664fda7-42fc-42a7-9c72-ccd5ce36a862/pull/0.log" Nov 21 15:42:37 crc kubenswrapper[5133]: I1121 15:42:37.666799 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprr57_2664fda7-42fc-42a7-9c72-ccd5ce36a862/util/0.log" Nov 21 15:42:37 crc kubenswrapper[5133]: I1121 15:42:37.708053 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772eprr57_2664fda7-42fc-42a7-9c72-ccd5ce36a862/extract/0.log" Nov 21 15:42:37 crc kubenswrapper[5133]: I1121 15:42:37.831627 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-jfgzq_7e13b754-a463-4945-95dd-5bbac447c494/extract-utilities/0.log" Nov 21 15:42:38 crc kubenswrapper[5133]: I1121 15:42:38.000581 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-jfgzq_7e13b754-a463-4945-95dd-5bbac447c494/extract-content/0.log" Nov 21 15:42:38 crc kubenswrapper[5133]: I1121 15:42:38.000883 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-jfgzq_7e13b754-a463-4945-95dd-5bbac447c494/extract-content/0.log" Nov 21 15:42:38 crc kubenswrapper[5133]: I1121 15:42:38.052643 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-jfgzq_7e13b754-a463-4945-95dd-5bbac447c494/extract-utilities/0.log" Nov 21 15:42:38 crc kubenswrapper[5133]: I1121 15:42:38.267229 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-jfgzq_7e13b754-a463-4945-95dd-5bbac447c494/extract-content/0.log" Nov 21 15:42:38 crc kubenswrapper[5133]: I1121 15:42:38.330132 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-jfgzq_7e13b754-a463-4945-95dd-5bbac447c494/extract-utilities/0.log" Nov 21 15:42:38 crc kubenswrapper[5133]: I1121 15:42:38.518546 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-j5d8m_243f1648-cb3f-4d5e-ac89-25469a1e9896/extract-utilities/0.log" Nov 21 15:42:38 crc kubenswrapper[5133]: I1121 15:42:38.716690 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-j5d8m_243f1648-cb3f-4d5e-ac89-25469a1e9896/extract-content/0.log" Nov 21 15:42:38 crc kubenswrapper[5133]: I1121 15:42:38.767154 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-j5d8m_243f1648-cb3f-4d5e-ac89-25469a1e9896/extract-utilities/0.log" Nov 21 15:42:38 crc kubenswrapper[5133]: I1121 15:42:38.798207 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-j5d8m_243f1648-cb3f-4d5e-ac89-25469a1e9896/extract-content/0.log" Nov 21 15:42:39 crc kubenswrapper[5133]: I1121 15:42:39.018624 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-j5d8m_243f1648-cb3f-4d5e-ac89-25469a1e9896/extract-utilities/0.log" Nov 21 15:42:39 crc kubenswrapper[5133]: I1121 15:42:39.022456 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-j5d8m_243f1648-cb3f-4d5e-ac89-25469a1e9896/extract-content/0.log" Nov 21 15:42:39 crc kubenswrapper[5133]: I1121 15:42:39.086795 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-jfgzq_7e13b754-a463-4945-95dd-5bbac447c494/registry-server/0.log" Nov 21 15:42:39 crc kubenswrapper[5133]: I1121 15:42:39.295803 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6fqwst_ba9d8865-d355-4438-a9eb-b50c55da374b/util/0.log" Nov 21 15:42:39 crc kubenswrapper[5133]: I1121 15:42:39.423855 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6fqwst_ba9d8865-d355-4438-a9eb-b50c55da374b/util/0.log" Nov 21 15:42:39 crc kubenswrapper[5133]: I1121 15:42:39.515696 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6fqwst_ba9d8865-d355-4438-a9eb-b50c55da374b/pull/0.log" Nov 21 15:42:39 crc kubenswrapper[5133]: I1121 15:42:39.593587 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6fqwst_ba9d8865-d355-4438-a9eb-b50c55da374b/pull/0.log" Nov 21 15:42:39 crc kubenswrapper[5133]: I1121 15:42:39.757361 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6fqwst_ba9d8865-d355-4438-a9eb-b50c55da374b/util/0.log" Nov 21 15:42:39 crc kubenswrapper[5133]: I1121 15:42:39.783868 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6fqwst_ba9d8865-d355-4438-a9eb-b50c55da374b/pull/0.log" Nov 21 15:42:39 crc kubenswrapper[5133]: I1121 15:42:39.795684 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-j5d8m_243f1648-cb3f-4d5e-ac89-25469a1e9896/registry-server/0.log" Nov 21 15:42:39 crc kubenswrapper[5133]: I1121 15:42:39.804956 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6fqwst_ba9d8865-d355-4438-a9eb-b50c55da374b/extract/0.log" Nov 21 15:42:39 crc kubenswrapper[5133]: I1121 15:42:39.937613 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-qp85d_22623623-0726-4900-899d-c2d0e34a9562/marketplace-operator/0.log" Nov 21 15:42:40 crc kubenswrapper[5133]: I1121 15:42:40.006780 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-jtgkd_3f8cf09c-4d75-475f-b3d9-9f0b892f0bc4/extract-utilities/0.log" Nov 21 15:42:40 crc kubenswrapper[5133]: I1121 15:42:40.158858 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-jtgkd_3f8cf09c-4d75-475f-b3d9-9f0b892f0bc4/extract-utilities/0.log" Nov 21 15:42:40 crc kubenswrapper[5133]: I1121 15:42:40.181941 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-jtgkd_3f8cf09c-4d75-475f-b3d9-9f0b892f0bc4/extract-content/0.log" Nov 21 15:42:40 crc kubenswrapper[5133]: I1121 15:42:40.189229 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-jtgkd_3f8cf09c-4d75-475f-b3d9-9f0b892f0bc4/extract-content/0.log" Nov 21 15:42:40 crc kubenswrapper[5133]: I1121 15:42:40.357532 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-jtgkd_3f8cf09c-4d75-475f-b3d9-9f0b892f0bc4/extract-utilities/0.log" Nov 21 15:42:40 crc kubenswrapper[5133]: I1121 15:42:40.362675 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-jtgkd_3f8cf09c-4d75-475f-b3d9-9f0b892f0bc4/extract-content/0.log" Nov 21 15:42:40 crc kubenswrapper[5133]: I1121 15:42:40.553736 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-7ls26_7c7ae814-704e-4480-8297-6a5309c94c22/extract-utilities/0.log" Nov 21 15:42:40 crc kubenswrapper[5133]: I1121 15:42:40.612715 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-jtgkd_3f8cf09c-4d75-475f-b3d9-9f0b892f0bc4/registry-server/0.log" Nov 21 15:42:40 crc kubenswrapper[5133]: I1121 15:42:40.716346 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-7ls26_7c7ae814-704e-4480-8297-6a5309c94c22/extract-utilities/0.log" Nov 21 15:42:40 crc kubenswrapper[5133]: I1121 15:42:40.743798 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-7ls26_7c7ae814-704e-4480-8297-6a5309c94c22/extract-content/0.log" Nov 21 15:42:40 crc kubenswrapper[5133]: I1121 15:42:40.746377 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-7ls26_7c7ae814-704e-4480-8297-6a5309c94c22/extract-content/0.log" Nov 21 15:42:40 crc kubenswrapper[5133]: I1121 15:42:40.935742 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-7ls26_7c7ae814-704e-4480-8297-6a5309c94c22/extract-content/0.log" Nov 21 15:42:40 crc kubenswrapper[5133]: I1121 15:42:40.947503 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-7ls26_7c7ae814-704e-4480-8297-6a5309c94c22/extract-utilities/0.log" Nov 21 15:42:41 crc kubenswrapper[5133]: I1121 15:42:41.684833 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-7ls26_7c7ae814-704e-4480-8297-6a5309c94c22/registry-server/0.log" Nov 21 15:42:53 crc kubenswrapper[5133]: I1121 15:42:53.311052 5133 patch_prober.go:28] interesting pod/machine-config-daemon-xxlvp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 21 15:42:53 crc kubenswrapper[5133]: I1121 15:42:53.311657 5133 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 21 15:42:53 crc kubenswrapper[5133]: I1121 15:42:53.311710 5133 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" Nov 21 15:42:53 crc kubenswrapper[5133]: I1121 15:42:53.312544 5133 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"b58db206d21a207a041b6beb15809267620ac7e9bf4cc60f4d82a43276d4adbd"} pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 21 15:42:53 crc kubenswrapper[5133]: I1121 15:42:53.312599 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" containerName="machine-config-daemon" containerID="cri-o://b58db206d21a207a041b6beb15809267620ac7e9bf4cc60f4d82a43276d4adbd" gracePeriod=600 Nov 21 15:42:53 crc kubenswrapper[5133]: E1121 15:42:53.433688 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:42:53 crc kubenswrapper[5133]: I1121 15:42:53.887070 5133 generic.go:334] "Generic (PLEG): container finished" podID="52f5a729-05d1-4f84-a216-1df3233af57d" containerID="b58db206d21a207a041b6beb15809267620ac7e9bf4cc60f4d82a43276d4adbd" exitCode=0 Nov 21 15:42:53 crc kubenswrapper[5133]: I1121 15:42:53.887141 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" event={"ID":"52f5a729-05d1-4f84-a216-1df3233af57d","Type":"ContainerDied","Data":"b58db206d21a207a041b6beb15809267620ac7e9bf4cc60f4d82a43276d4adbd"} Nov 21 15:42:53 crc kubenswrapper[5133]: I1121 15:42:53.887735 5133 scope.go:117] "RemoveContainer" containerID="687490ee2e06821d0dabcc541391ccb5223aacb17e1dc37e0067e4b8571cb9fa" Nov 21 15:42:53 crc kubenswrapper[5133]: I1121 15:42:53.888396 5133 scope.go:117] "RemoveContainer" containerID="b58db206d21a207a041b6beb15809267620ac7e9bf4cc60f4d82a43276d4adbd" Nov 21 15:42:53 crc kubenswrapper[5133]: E1121 15:42:53.888733 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:43:04 crc kubenswrapper[5133]: I1121 15:43:04.458015 5133 scope.go:117] "RemoveContainer" containerID="b58db206d21a207a041b6beb15809267620ac7e9bf4cc60f4d82a43276d4adbd" Nov 21 15:43:04 crc kubenswrapper[5133]: E1121 15:43:04.458866 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:43:05 crc kubenswrapper[5133]: I1121 15:43:05.065036 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-jddvk"] Nov 21 15:43:05 crc kubenswrapper[5133]: E1121 15:43:05.065714 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dd705a71-4d9f-40f8-abb4-a4c9ab3bab6f" containerName="extract-content" Nov 21 15:43:05 crc kubenswrapper[5133]: I1121 15:43:05.065731 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="dd705a71-4d9f-40f8-abb4-a4c9ab3bab6f" containerName="extract-content" Nov 21 15:43:05 crc kubenswrapper[5133]: E1121 15:43:05.065747 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dd705a71-4d9f-40f8-abb4-a4c9ab3bab6f" containerName="extract-utilities" Nov 21 15:43:05 crc kubenswrapper[5133]: I1121 15:43:05.065757 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="dd705a71-4d9f-40f8-abb4-a4c9ab3bab6f" containerName="extract-utilities" Nov 21 15:43:05 crc kubenswrapper[5133]: E1121 15:43:05.065781 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dd705a71-4d9f-40f8-abb4-a4c9ab3bab6f" containerName="registry-server" Nov 21 15:43:05 crc kubenswrapper[5133]: I1121 15:43:05.065788 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="dd705a71-4d9f-40f8-abb4-a4c9ab3bab6f" containerName="registry-server" Nov 21 15:43:05 crc kubenswrapper[5133]: I1121 15:43:05.065974 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="dd705a71-4d9f-40f8-abb4-a4c9ab3bab6f" containerName="registry-server" Nov 21 15:43:05 crc kubenswrapper[5133]: I1121 15:43:05.067330 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jddvk" Nov 21 15:43:05 crc kubenswrapper[5133]: I1121 15:43:05.080990 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-jddvk"] Nov 21 15:43:05 crc kubenswrapper[5133]: I1121 15:43:05.095131 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rzrzp\" (UniqueName: \"kubernetes.io/projected/bcfe872e-7da5-448f-89b2-b6cecd870522-kube-api-access-rzrzp\") pod \"certified-operators-jddvk\" (UID: \"bcfe872e-7da5-448f-89b2-b6cecd870522\") " pod="openshift-marketplace/certified-operators-jddvk" Nov 21 15:43:05 crc kubenswrapper[5133]: I1121 15:43:05.095233 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bcfe872e-7da5-448f-89b2-b6cecd870522-utilities\") pod \"certified-operators-jddvk\" (UID: \"bcfe872e-7da5-448f-89b2-b6cecd870522\") " pod="openshift-marketplace/certified-operators-jddvk" Nov 21 15:43:05 crc kubenswrapper[5133]: I1121 15:43:05.095270 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bcfe872e-7da5-448f-89b2-b6cecd870522-catalog-content\") pod \"certified-operators-jddvk\" (UID: \"bcfe872e-7da5-448f-89b2-b6cecd870522\") " pod="openshift-marketplace/certified-operators-jddvk" Nov 21 15:43:05 crc kubenswrapper[5133]: I1121 15:43:05.198399 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rzrzp\" (UniqueName: \"kubernetes.io/projected/bcfe872e-7da5-448f-89b2-b6cecd870522-kube-api-access-rzrzp\") pod \"certified-operators-jddvk\" (UID: \"bcfe872e-7da5-448f-89b2-b6cecd870522\") " pod="openshift-marketplace/certified-operators-jddvk" Nov 21 15:43:05 crc kubenswrapper[5133]: I1121 15:43:05.198627 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bcfe872e-7da5-448f-89b2-b6cecd870522-utilities\") pod \"certified-operators-jddvk\" (UID: \"bcfe872e-7da5-448f-89b2-b6cecd870522\") " pod="openshift-marketplace/certified-operators-jddvk" Nov 21 15:43:05 crc kubenswrapper[5133]: I1121 15:43:05.198687 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bcfe872e-7da5-448f-89b2-b6cecd870522-catalog-content\") pod \"certified-operators-jddvk\" (UID: \"bcfe872e-7da5-448f-89b2-b6cecd870522\") " pod="openshift-marketplace/certified-operators-jddvk" Nov 21 15:43:05 crc kubenswrapper[5133]: I1121 15:43:05.199244 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bcfe872e-7da5-448f-89b2-b6cecd870522-utilities\") pod \"certified-operators-jddvk\" (UID: \"bcfe872e-7da5-448f-89b2-b6cecd870522\") " pod="openshift-marketplace/certified-operators-jddvk" Nov 21 15:43:05 crc kubenswrapper[5133]: I1121 15:43:05.199273 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bcfe872e-7da5-448f-89b2-b6cecd870522-catalog-content\") pod \"certified-operators-jddvk\" (UID: \"bcfe872e-7da5-448f-89b2-b6cecd870522\") " pod="openshift-marketplace/certified-operators-jddvk" Nov 21 15:43:05 crc kubenswrapper[5133]: I1121 15:43:05.221506 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rzrzp\" (UniqueName: \"kubernetes.io/projected/bcfe872e-7da5-448f-89b2-b6cecd870522-kube-api-access-rzrzp\") pod \"certified-operators-jddvk\" (UID: \"bcfe872e-7da5-448f-89b2-b6cecd870522\") " pod="openshift-marketplace/certified-operators-jddvk" Nov 21 15:43:05 crc kubenswrapper[5133]: I1121 15:43:05.395411 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jddvk" Nov 21 15:43:05 crc kubenswrapper[5133]: I1121 15:43:05.982359 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-jddvk"] Nov 21 15:43:06 crc kubenswrapper[5133]: I1121 15:43:06.006266 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jddvk" event={"ID":"bcfe872e-7da5-448f-89b2-b6cecd870522","Type":"ContainerStarted","Data":"a73afa5962caf714c6f77eb999136fbf24771b44c62609caa3628f4764da9b2a"} Nov 21 15:43:07 crc kubenswrapper[5133]: I1121 15:43:07.015573 5133 generic.go:334] "Generic (PLEG): container finished" podID="bcfe872e-7da5-448f-89b2-b6cecd870522" containerID="64046492a9618754fa5ce4d29c483b42757f5068e89d575e43ba467869ea64fa" exitCode=0 Nov 21 15:43:07 crc kubenswrapper[5133]: I1121 15:43:07.015671 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jddvk" event={"ID":"bcfe872e-7da5-448f-89b2-b6cecd870522","Type":"ContainerDied","Data":"64046492a9618754fa5ce4d29c483b42757f5068e89d575e43ba467869ea64fa"} Nov 21 15:43:08 crc kubenswrapper[5133]: I1121 15:43:08.047802 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jddvk" event={"ID":"bcfe872e-7da5-448f-89b2-b6cecd870522","Type":"ContainerStarted","Data":"bae0c6fd9e2d25fef56ca7819fc6ac9f29af3b2b8498df60bea5003d37713373"} Nov 21 15:43:10 crc kubenswrapper[5133]: I1121 15:43:10.068403 5133 generic.go:334] "Generic (PLEG): container finished" podID="bcfe872e-7da5-448f-89b2-b6cecd870522" containerID="bae0c6fd9e2d25fef56ca7819fc6ac9f29af3b2b8498df60bea5003d37713373" exitCode=0 Nov 21 15:43:10 crc kubenswrapper[5133]: I1121 15:43:10.068470 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jddvk" event={"ID":"bcfe872e-7da5-448f-89b2-b6cecd870522","Type":"ContainerDied","Data":"bae0c6fd9e2d25fef56ca7819fc6ac9f29af3b2b8498df60bea5003d37713373"} Nov 21 15:43:11 crc kubenswrapper[5133]: I1121 15:43:11.086619 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jddvk" event={"ID":"bcfe872e-7da5-448f-89b2-b6cecd870522","Type":"ContainerStarted","Data":"0ee9596c059cc39f823f94598fdf00c6125947e0341325e180fa35b41b10c8ab"} Nov 21 15:43:11 crc kubenswrapper[5133]: I1121 15:43:11.112881 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-jddvk" podStartSLOduration=2.693104757 podStartE2EDuration="6.112858451s" podCreationTimestamp="2025-11-21 15:43:05 +0000 UTC" firstStartedPulling="2025-11-21 15:43:07.038017439 +0000 UTC m=+7246.835849687" lastFinishedPulling="2025-11-21 15:43:10.457771133 +0000 UTC m=+7250.255603381" observedRunningTime="2025-11-21 15:43:11.107892979 +0000 UTC m=+7250.905725227" watchObservedRunningTime="2025-11-21 15:43:11.112858451 +0000 UTC m=+7250.910690699" Nov 21 15:43:15 crc kubenswrapper[5133]: I1121 15:43:15.395738 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-jddvk" Nov 21 15:43:15 crc kubenswrapper[5133]: I1121 15:43:15.396346 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-jddvk" Nov 21 15:43:15 crc kubenswrapper[5133]: I1121 15:43:15.457120 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-jddvk" Nov 21 15:43:16 crc kubenswrapper[5133]: I1121 15:43:16.193881 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-jddvk" Nov 21 15:43:16 crc kubenswrapper[5133]: I1121 15:43:16.258413 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-jddvk"] Nov 21 15:43:18 crc kubenswrapper[5133]: I1121 15:43:18.147489 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-jddvk" podUID="bcfe872e-7da5-448f-89b2-b6cecd870522" containerName="registry-server" containerID="cri-o://0ee9596c059cc39f823f94598fdf00c6125947e0341325e180fa35b41b10c8ab" gracePeriod=2 Nov 21 15:43:18 crc kubenswrapper[5133]: I1121 15:43:18.458260 5133 scope.go:117] "RemoveContainer" containerID="b58db206d21a207a041b6beb15809267620ac7e9bf4cc60f4d82a43276d4adbd" Nov 21 15:43:18 crc kubenswrapper[5133]: E1121 15:43:18.458836 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:43:18 crc kubenswrapper[5133]: I1121 15:43:18.639033 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jddvk" Nov 21 15:43:18 crc kubenswrapper[5133]: I1121 15:43:18.757670 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rzrzp\" (UniqueName: \"kubernetes.io/projected/bcfe872e-7da5-448f-89b2-b6cecd870522-kube-api-access-rzrzp\") pod \"bcfe872e-7da5-448f-89b2-b6cecd870522\" (UID: \"bcfe872e-7da5-448f-89b2-b6cecd870522\") " Nov 21 15:43:18 crc kubenswrapper[5133]: I1121 15:43:18.757827 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bcfe872e-7da5-448f-89b2-b6cecd870522-catalog-content\") pod \"bcfe872e-7da5-448f-89b2-b6cecd870522\" (UID: \"bcfe872e-7da5-448f-89b2-b6cecd870522\") " Nov 21 15:43:18 crc kubenswrapper[5133]: I1121 15:43:18.758045 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bcfe872e-7da5-448f-89b2-b6cecd870522-utilities\") pod \"bcfe872e-7da5-448f-89b2-b6cecd870522\" (UID: \"bcfe872e-7da5-448f-89b2-b6cecd870522\") " Nov 21 15:43:18 crc kubenswrapper[5133]: I1121 15:43:18.759329 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bcfe872e-7da5-448f-89b2-b6cecd870522-utilities" (OuterVolumeSpecName: "utilities") pod "bcfe872e-7da5-448f-89b2-b6cecd870522" (UID: "bcfe872e-7da5-448f-89b2-b6cecd870522"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:43:18 crc kubenswrapper[5133]: I1121 15:43:18.781398 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bcfe872e-7da5-448f-89b2-b6cecd870522-kube-api-access-rzrzp" (OuterVolumeSpecName: "kube-api-access-rzrzp") pod "bcfe872e-7da5-448f-89b2-b6cecd870522" (UID: "bcfe872e-7da5-448f-89b2-b6cecd870522"). InnerVolumeSpecName "kube-api-access-rzrzp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:43:18 crc kubenswrapper[5133]: I1121 15:43:18.820748 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bcfe872e-7da5-448f-89b2-b6cecd870522-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "bcfe872e-7da5-448f-89b2-b6cecd870522" (UID: "bcfe872e-7da5-448f-89b2-b6cecd870522"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:43:18 crc kubenswrapper[5133]: I1121 15:43:18.860175 5133 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bcfe872e-7da5-448f-89b2-b6cecd870522-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 15:43:18 crc kubenswrapper[5133]: I1121 15:43:18.860209 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rzrzp\" (UniqueName: \"kubernetes.io/projected/bcfe872e-7da5-448f-89b2-b6cecd870522-kube-api-access-rzrzp\") on node \"crc\" DevicePath \"\"" Nov 21 15:43:18 crc kubenswrapper[5133]: I1121 15:43:18.860221 5133 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bcfe872e-7da5-448f-89b2-b6cecd870522-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 15:43:19 crc kubenswrapper[5133]: I1121 15:43:19.158220 5133 generic.go:334] "Generic (PLEG): container finished" podID="bcfe872e-7da5-448f-89b2-b6cecd870522" containerID="0ee9596c059cc39f823f94598fdf00c6125947e0341325e180fa35b41b10c8ab" exitCode=0 Nov 21 15:43:19 crc kubenswrapper[5133]: I1121 15:43:19.158566 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jddvk" event={"ID":"bcfe872e-7da5-448f-89b2-b6cecd870522","Type":"ContainerDied","Data":"0ee9596c059cc39f823f94598fdf00c6125947e0341325e180fa35b41b10c8ab"} Nov 21 15:43:19 crc kubenswrapper[5133]: I1121 15:43:19.158593 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jddvk" event={"ID":"bcfe872e-7da5-448f-89b2-b6cecd870522","Type":"ContainerDied","Data":"a73afa5962caf714c6f77eb999136fbf24771b44c62609caa3628f4764da9b2a"} Nov 21 15:43:19 crc kubenswrapper[5133]: I1121 15:43:19.158609 5133 scope.go:117] "RemoveContainer" containerID="0ee9596c059cc39f823f94598fdf00c6125947e0341325e180fa35b41b10c8ab" Nov 21 15:43:19 crc kubenswrapper[5133]: I1121 15:43:19.158737 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jddvk" Nov 21 15:43:19 crc kubenswrapper[5133]: I1121 15:43:19.207313 5133 scope.go:117] "RemoveContainer" containerID="bae0c6fd9e2d25fef56ca7819fc6ac9f29af3b2b8498df60bea5003d37713373" Nov 21 15:43:19 crc kubenswrapper[5133]: I1121 15:43:19.210083 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-jddvk"] Nov 21 15:43:19 crc kubenswrapper[5133]: I1121 15:43:19.222963 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-jddvk"] Nov 21 15:43:19 crc kubenswrapper[5133]: I1121 15:43:19.231348 5133 scope.go:117] "RemoveContainer" containerID="64046492a9618754fa5ce4d29c483b42757f5068e89d575e43ba467869ea64fa" Nov 21 15:43:19 crc kubenswrapper[5133]: I1121 15:43:19.295596 5133 scope.go:117] "RemoveContainer" containerID="0ee9596c059cc39f823f94598fdf00c6125947e0341325e180fa35b41b10c8ab" Nov 21 15:43:19 crc kubenswrapper[5133]: E1121 15:43:19.296148 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0ee9596c059cc39f823f94598fdf00c6125947e0341325e180fa35b41b10c8ab\": container with ID starting with 0ee9596c059cc39f823f94598fdf00c6125947e0341325e180fa35b41b10c8ab not found: ID does not exist" containerID="0ee9596c059cc39f823f94598fdf00c6125947e0341325e180fa35b41b10c8ab" Nov 21 15:43:19 crc kubenswrapper[5133]: I1121 15:43:19.296195 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0ee9596c059cc39f823f94598fdf00c6125947e0341325e180fa35b41b10c8ab"} err="failed to get container status \"0ee9596c059cc39f823f94598fdf00c6125947e0341325e180fa35b41b10c8ab\": rpc error: code = NotFound desc = could not find container \"0ee9596c059cc39f823f94598fdf00c6125947e0341325e180fa35b41b10c8ab\": container with ID starting with 0ee9596c059cc39f823f94598fdf00c6125947e0341325e180fa35b41b10c8ab not found: ID does not exist" Nov 21 15:43:19 crc kubenswrapper[5133]: I1121 15:43:19.296232 5133 scope.go:117] "RemoveContainer" containerID="bae0c6fd9e2d25fef56ca7819fc6ac9f29af3b2b8498df60bea5003d37713373" Nov 21 15:43:19 crc kubenswrapper[5133]: E1121 15:43:19.297211 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bae0c6fd9e2d25fef56ca7819fc6ac9f29af3b2b8498df60bea5003d37713373\": container with ID starting with bae0c6fd9e2d25fef56ca7819fc6ac9f29af3b2b8498df60bea5003d37713373 not found: ID does not exist" containerID="bae0c6fd9e2d25fef56ca7819fc6ac9f29af3b2b8498df60bea5003d37713373" Nov 21 15:43:19 crc kubenswrapper[5133]: I1121 15:43:19.297285 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bae0c6fd9e2d25fef56ca7819fc6ac9f29af3b2b8498df60bea5003d37713373"} err="failed to get container status \"bae0c6fd9e2d25fef56ca7819fc6ac9f29af3b2b8498df60bea5003d37713373\": rpc error: code = NotFound desc = could not find container \"bae0c6fd9e2d25fef56ca7819fc6ac9f29af3b2b8498df60bea5003d37713373\": container with ID starting with bae0c6fd9e2d25fef56ca7819fc6ac9f29af3b2b8498df60bea5003d37713373 not found: ID does not exist" Nov 21 15:43:19 crc kubenswrapper[5133]: I1121 15:43:19.297323 5133 scope.go:117] "RemoveContainer" containerID="64046492a9618754fa5ce4d29c483b42757f5068e89d575e43ba467869ea64fa" Nov 21 15:43:19 crc kubenswrapper[5133]: E1121 15:43:19.297582 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"64046492a9618754fa5ce4d29c483b42757f5068e89d575e43ba467869ea64fa\": container with ID starting with 64046492a9618754fa5ce4d29c483b42757f5068e89d575e43ba467869ea64fa not found: ID does not exist" containerID="64046492a9618754fa5ce4d29c483b42757f5068e89d575e43ba467869ea64fa" Nov 21 15:43:19 crc kubenswrapper[5133]: I1121 15:43:19.297613 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"64046492a9618754fa5ce4d29c483b42757f5068e89d575e43ba467869ea64fa"} err="failed to get container status \"64046492a9618754fa5ce4d29c483b42757f5068e89d575e43ba467869ea64fa\": rpc error: code = NotFound desc = could not find container \"64046492a9618754fa5ce4d29c483b42757f5068e89d575e43ba467869ea64fa\": container with ID starting with 64046492a9618754fa5ce4d29c483b42757f5068e89d575e43ba467869ea64fa not found: ID does not exist" Nov 21 15:43:20 crc kubenswrapper[5133]: I1121 15:43:20.468720 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bcfe872e-7da5-448f-89b2-b6cecd870522" path="/var/lib/kubelet/pods/bcfe872e-7da5-448f-89b2-b6cecd870522/volumes" Nov 21 15:43:30 crc kubenswrapper[5133]: I1121 15:43:30.458390 5133 scope.go:117] "RemoveContainer" containerID="b58db206d21a207a041b6beb15809267620ac7e9bf4cc60f4d82a43276d4adbd" Nov 21 15:43:30 crc kubenswrapper[5133]: E1121 15:43:30.459493 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:43:42 crc kubenswrapper[5133]: I1121 15:43:42.463356 5133 scope.go:117] "RemoveContainer" containerID="b58db206d21a207a041b6beb15809267620ac7e9bf4cc60f4d82a43276d4adbd" Nov 21 15:43:42 crc kubenswrapper[5133]: E1121 15:43:42.464176 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:43:56 crc kubenswrapper[5133]: I1121 15:43:56.457747 5133 scope.go:117] "RemoveContainer" containerID="b58db206d21a207a041b6beb15809267620ac7e9bf4cc60f4d82a43276d4adbd" Nov 21 15:43:56 crc kubenswrapper[5133]: E1121 15:43:56.458978 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:44:08 crc kubenswrapper[5133]: I1121 15:44:08.459718 5133 scope.go:117] "RemoveContainer" containerID="b58db206d21a207a041b6beb15809267620ac7e9bf4cc60f4d82a43276d4adbd" Nov 21 15:44:08 crc kubenswrapper[5133]: E1121 15:44:08.460696 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:44:22 crc kubenswrapper[5133]: I1121 15:44:22.464918 5133 scope.go:117] "RemoveContainer" containerID="b58db206d21a207a041b6beb15809267620ac7e9bf4cc60f4d82a43276d4adbd" Nov 21 15:44:22 crc kubenswrapper[5133]: E1121 15:44:22.465540 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:44:34 crc kubenswrapper[5133]: I1121 15:44:34.459348 5133 scope.go:117] "RemoveContainer" containerID="b58db206d21a207a041b6beb15809267620ac7e9bf4cc60f4d82a43276d4adbd" Nov 21 15:44:34 crc kubenswrapper[5133]: E1121 15:44:34.461443 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:44:41 crc kubenswrapper[5133]: I1121 15:44:41.043802 5133 generic.go:334] "Generic (PLEG): container finished" podID="b2f02843-c088-41b6-832e-90acb53319aa" containerID="053888ff03a7ce10b44412f548a0b8123200283b7d1e77859f325851df7f8db4" exitCode=0 Nov 21 15:44:41 crc kubenswrapper[5133]: I1121 15:44:41.044022 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-fft98/must-gather-c4tfn" event={"ID":"b2f02843-c088-41b6-832e-90acb53319aa","Type":"ContainerDied","Data":"053888ff03a7ce10b44412f548a0b8123200283b7d1e77859f325851df7f8db4"} Nov 21 15:44:41 crc kubenswrapper[5133]: I1121 15:44:41.045289 5133 scope.go:117] "RemoveContainer" containerID="053888ff03a7ce10b44412f548a0b8123200283b7d1e77859f325851df7f8db4" Nov 21 15:44:41 crc kubenswrapper[5133]: I1121 15:44:41.512174 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-fft98_must-gather-c4tfn_b2f02843-c088-41b6-832e-90acb53319aa/gather/0.log" Nov 21 15:44:48 crc kubenswrapper[5133]: I1121 15:44:48.465881 5133 scope.go:117] "RemoveContainer" containerID="b58db206d21a207a041b6beb15809267620ac7e9bf4cc60f4d82a43276d4adbd" Nov 21 15:44:48 crc kubenswrapper[5133]: E1121 15:44:48.466925 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:44:50 crc kubenswrapper[5133]: I1121 15:44:50.079054 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-fft98/must-gather-c4tfn"] Nov 21 15:44:50 crc kubenswrapper[5133]: I1121 15:44:50.079802 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-fft98/must-gather-c4tfn" podUID="b2f02843-c088-41b6-832e-90acb53319aa" containerName="copy" containerID="cri-o://cacfb1c0cc8a197a8ad52c6625a29748f6cd6fea086c11bc5bb5817cddd26dc6" gracePeriod=2 Nov 21 15:44:50 crc kubenswrapper[5133]: I1121 15:44:50.092290 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-fft98/must-gather-c4tfn"] Nov 21 15:44:50 crc kubenswrapper[5133]: I1121 15:44:50.562402 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-fft98_must-gather-c4tfn_b2f02843-c088-41b6-832e-90acb53319aa/copy/0.log" Nov 21 15:44:50 crc kubenswrapper[5133]: I1121 15:44:50.562932 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-fft98/must-gather-c4tfn" Nov 21 15:44:50 crc kubenswrapper[5133]: I1121 15:44:50.632985 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/b2f02843-c088-41b6-832e-90acb53319aa-must-gather-output\") pod \"b2f02843-c088-41b6-832e-90acb53319aa\" (UID: \"b2f02843-c088-41b6-832e-90acb53319aa\") " Nov 21 15:44:50 crc kubenswrapper[5133]: I1121 15:44:50.633109 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2bfh6\" (UniqueName: \"kubernetes.io/projected/b2f02843-c088-41b6-832e-90acb53319aa-kube-api-access-2bfh6\") pod \"b2f02843-c088-41b6-832e-90acb53319aa\" (UID: \"b2f02843-c088-41b6-832e-90acb53319aa\") " Nov 21 15:44:50 crc kubenswrapper[5133]: I1121 15:44:50.645233 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b2f02843-c088-41b6-832e-90acb53319aa-kube-api-access-2bfh6" (OuterVolumeSpecName: "kube-api-access-2bfh6") pod "b2f02843-c088-41b6-832e-90acb53319aa" (UID: "b2f02843-c088-41b6-832e-90acb53319aa"). InnerVolumeSpecName "kube-api-access-2bfh6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:44:50 crc kubenswrapper[5133]: I1121 15:44:50.735677 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2bfh6\" (UniqueName: \"kubernetes.io/projected/b2f02843-c088-41b6-832e-90acb53319aa-kube-api-access-2bfh6\") on node \"crc\" DevicePath \"\"" Nov 21 15:44:50 crc kubenswrapper[5133]: I1121 15:44:50.815484 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b2f02843-c088-41b6-832e-90acb53319aa-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "b2f02843-c088-41b6-832e-90acb53319aa" (UID: "b2f02843-c088-41b6-832e-90acb53319aa"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:44:50 crc kubenswrapper[5133]: I1121 15:44:50.838127 5133 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/b2f02843-c088-41b6-832e-90acb53319aa-must-gather-output\") on node \"crc\" DevicePath \"\"" Nov 21 15:44:51 crc kubenswrapper[5133]: I1121 15:44:51.146490 5133 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-fft98_must-gather-c4tfn_b2f02843-c088-41b6-832e-90acb53319aa/copy/0.log" Nov 21 15:44:51 crc kubenswrapper[5133]: I1121 15:44:51.147456 5133 generic.go:334] "Generic (PLEG): container finished" podID="b2f02843-c088-41b6-832e-90acb53319aa" containerID="cacfb1c0cc8a197a8ad52c6625a29748f6cd6fea086c11bc5bb5817cddd26dc6" exitCode=143 Nov 21 15:44:51 crc kubenswrapper[5133]: I1121 15:44:51.147505 5133 scope.go:117] "RemoveContainer" containerID="cacfb1c0cc8a197a8ad52c6625a29748f6cd6fea086c11bc5bb5817cddd26dc6" Nov 21 15:44:51 crc kubenswrapper[5133]: I1121 15:44:51.147531 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-fft98/must-gather-c4tfn" Nov 21 15:44:51 crc kubenswrapper[5133]: I1121 15:44:51.181404 5133 scope.go:117] "RemoveContainer" containerID="053888ff03a7ce10b44412f548a0b8123200283b7d1e77859f325851df7f8db4" Nov 21 15:44:51 crc kubenswrapper[5133]: I1121 15:44:51.278417 5133 scope.go:117] "RemoveContainer" containerID="cacfb1c0cc8a197a8ad52c6625a29748f6cd6fea086c11bc5bb5817cddd26dc6" Nov 21 15:44:51 crc kubenswrapper[5133]: E1121 15:44:51.279409 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cacfb1c0cc8a197a8ad52c6625a29748f6cd6fea086c11bc5bb5817cddd26dc6\": container with ID starting with cacfb1c0cc8a197a8ad52c6625a29748f6cd6fea086c11bc5bb5817cddd26dc6 not found: ID does not exist" containerID="cacfb1c0cc8a197a8ad52c6625a29748f6cd6fea086c11bc5bb5817cddd26dc6" Nov 21 15:44:51 crc kubenswrapper[5133]: I1121 15:44:51.279472 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cacfb1c0cc8a197a8ad52c6625a29748f6cd6fea086c11bc5bb5817cddd26dc6"} err="failed to get container status \"cacfb1c0cc8a197a8ad52c6625a29748f6cd6fea086c11bc5bb5817cddd26dc6\": rpc error: code = NotFound desc = could not find container \"cacfb1c0cc8a197a8ad52c6625a29748f6cd6fea086c11bc5bb5817cddd26dc6\": container with ID starting with cacfb1c0cc8a197a8ad52c6625a29748f6cd6fea086c11bc5bb5817cddd26dc6 not found: ID does not exist" Nov 21 15:44:51 crc kubenswrapper[5133]: I1121 15:44:51.279500 5133 scope.go:117] "RemoveContainer" containerID="053888ff03a7ce10b44412f548a0b8123200283b7d1e77859f325851df7f8db4" Nov 21 15:44:51 crc kubenswrapper[5133]: E1121 15:44:51.280015 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"053888ff03a7ce10b44412f548a0b8123200283b7d1e77859f325851df7f8db4\": container with ID starting with 053888ff03a7ce10b44412f548a0b8123200283b7d1e77859f325851df7f8db4 not found: ID does not exist" containerID="053888ff03a7ce10b44412f548a0b8123200283b7d1e77859f325851df7f8db4" Nov 21 15:44:51 crc kubenswrapper[5133]: I1121 15:44:51.280069 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"053888ff03a7ce10b44412f548a0b8123200283b7d1e77859f325851df7f8db4"} err="failed to get container status \"053888ff03a7ce10b44412f548a0b8123200283b7d1e77859f325851df7f8db4\": rpc error: code = NotFound desc = could not find container \"053888ff03a7ce10b44412f548a0b8123200283b7d1e77859f325851df7f8db4\": container with ID starting with 053888ff03a7ce10b44412f548a0b8123200283b7d1e77859f325851df7f8db4 not found: ID does not exist" Nov 21 15:44:52 crc kubenswrapper[5133]: I1121 15:44:52.471093 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b2f02843-c088-41b6-832e-90acb53319aa" path="/var/lib/kubelet/pods/b2f02843-c088-41b6-832e-90acb53319aa/volumes" Nov 21 15:45:00 crc kubenswrapper[5133]: I1121 15:45:00.186269 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395665-9x585"] Nov 21 15:45:00 crc kubenswrapper[5133]: E1121 15:45:00.187571 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bcfe872e-7da5-448f-89b2-b6cecd870522" containerName="extract-content" Nov 21 15:45:00 crc kubenswrapper[5133]: I1121 15:45:00.187595 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="bcfe872e-7da5-448f-89b2-b6cecd870522" containerName="extract-content" Nov 21 15:45:00 crc kubenswrapper[5133]: E1121 15:45:00.187614 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2f02843-c088-41b6-832e-90acb53319aa" containerName="gather" Nov 21 15:45:00 crc kubenswrapper[5133]: I1121 15:45:00.187625 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2f02843-c088-41b6-832e-90acb53319aa" containerName="gather" Nov 21 15:45:00 crc kubenswrapper[5133]: E1121 15:45:00.187648 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bcfe872e-7da5-448f-89b2-b6cecd870522" containerName="extract-utilities" Nov 21 15:45:00 crc kubenswrapper[5133]: I1121 15:45:00.187662 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="bcfe872e-7da5-448f-89b2-b6cecd870522" containerName="extract-utilities" Nov 21 15:45:00 crc kubenswrapper[5133]: E1121 15:45:00.187686 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2f02843-c088-41b6-832e-90acb53319aa" containerName="copy" Nov 21 15:45:00 crc kubenswrapper[5133]: I1121 15:45:00.187696 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2f02843-c088-41b6-832e-90acb53319aa" containerName="copy" Nov 21 15:45:00 crc kubenswrapper[5133]: E1121 15:45:00.187737 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bcfe872e-7da5-448f-89b2-b6cecd870522" containerName="registry-server" Nov 21 15:45:00 crc kubenswrapper[5133]: I1121 15:45:00.187747 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="bcfe872e-7da5-448f-89b2-b6cecd870522" containerName="registry-server" Nov 21 15:45:00 crc kubenswrapper[5133]: I1121 15:45:00.188140 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="b2f02843-c088-41b6-832e-90acb53319aa" containerName="gather" Nov 21 15:45:00 crc kubenswrapper[5133]: I1121 15:45:00.188171 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="b2f02843-c088-41b6-832e-90acb53319aa" containerName="copy" Nov 21 15:45:00 crc kubenswrapper[5133]: I1121 15:45:00.188199 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="bcfe872e-7da5-448f-89b2-b6cecd870522" containerName="registry-server" Nov 21 15:45:00 crc kubenswrapper[5133]: I1121 15:45:00.189360 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395665-9x585" Nov 21 15:45:00 crc kubenswrapper[5133]: I1121 15:45:00.192338 5133 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 21 15:45:00 crc kubenswrapper[5133]: I1121 15:45:00.192881 5133 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 21 15:45:00 crc kubenswrapper[5133]: I1121 15:45:00.200209 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395665-9x585"] Nov 21 15:45:00 crc kubenswrapper[5133]: I1121 15:45:00.326076 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0bb4c161-26ff-4bb0-b3c3-bf4536a628f0-secret-volume\") pod \"collect-profiles-29395665-9x585\" (UID: \"0bb4c161-26ff-4bb0-b3c3-bf4536a628f0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395665-9x585" Nov 21 15:45:00 crc kubenswrapper[5133]: I1121 15:45:00.326303 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0bb4c161-26ff-4bb0-b3c3-bf4536a628f0-config-volume\") pod \"collect-profiles-29395665-9x585\" (UID: \"0bb4c161-26ff-4bb0-b3c3-bf4536a628f0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395665-9x585" Nov 21 15:45:00 crc kubenswrapper[5133]: I1121 15:45:00.326382 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8zskc\" (UniqueName: \"kubernetes.io/projected/0bb4c161-26ff-4bb0-b3c3-bf4536a628f0-kube-api-access-8zskc\") pod \"collect-profiles-29395665-9x585\" (UID: \"0bb4c161-26ff-4bb0-b3c3-bf4536a628f0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395665-9x585" Nov 21 15:45:00 crc kubenswrapper[5133]: I1121 15:45:00.428262 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0bb4c161-26ff-4bb0-b3c3-bf4536a628f0-config-volume\") pod \"collect-profiles-29395665-9x585\" (UID: \"0bb4c161-26ff-4bb0-b3c3-bf4536a628f0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395665-9x585" Nov 21 15:45:00 crc kubenswrapper[5133]: I1121 15:45:00.428365 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8zskc\" (UniqueName: \"kubernetes.io/projected/0bb4c161-26ff-4bb0-b3c3-bf4536a628f0-kube-api-access-8zskc\") pod \"collect-profiles-29395665-9x585\" (UID: \"0bb4c161-26ff-4bb0-b3c3-bf4536a628f0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395665-9x585" Nov 21 15:45:00 crc kubenswrapper[5133]: I1121 15:45:00.428487 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0bb4c161-26ff-4bb0-b3c3-bf4536a628f0-secret-volume\") pod \"collect-profiles-29395665-9x585\" (UID: \"0bb4c161-26ff-4bb0-b3c3-bf4536a628f0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395665-9x585" Nov 21 15:45:00 crc kubenswrapper[5133]: I1121 15:45:00.430633 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0bb4c161-26ff-4bb0-b3c3-bf4536a628f0-config-volume\") pod \"collect-profiles-29395665-9x585\" (UID: \"0bb4c161-26ff-4bb0-b3c3-bf4536a628f0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395665-9x585" Nov 21 15:45:00 crc kubenswrapper[5133]: I1121 15:45:00.437370 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0bb4c161-26ff-4bb0-b3c3-bf4536a628f0-secret-volume\") pod \"collect-profiles-29395665-9x585\" (UID: \"0bb4c161-26ff-4bb0-b3c3-bf4536a628f0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395665-9x585" Nov 21 15:45:00 crc kubenswrapper[5133]: I1121 15:45:00.447123 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8zskc\" (UniqueName: \"kubernetes.io/projected/0bb4c161-26ff-4bb0-b3c3-bf4536a628f0-kube-api-access-8zskc\") pod \"collect-profiles-29395665-9x585\" (UID: \"0bb4c161-26ff-4bb0-b3c3-bf4536a628f0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29395665-9x585" Nov 21 15:45:00 crc kubenswrapper[5133]: I1121 15:45:00.512648 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395665-9x585" Nov 21 15:45:00 crc kubenswrapper[5133]: I1121 15:45:00.974570 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395665-9x585"] Nov 21 15:45:01 crc kubenswrapper[5133]: I1121 15:45:01.244426 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395665-9x585" event={"ID":"0bb4c161-26ff-4bb0-b3c3-bf4536a628f0","Type":"ContainerStarted","Data":"4d3a8cdf71c2d1ea1f4a06a1fb5bf24043d400b0ddb916ae03ffa70ab17a049d"} Nov 21 15:45:01 crc kubenswrapper[5133]: I1121 15:45:01.244696 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395665-9x585" event={"ID":"0bb4c161-26ff-4bb0-b3c3-bf4536a628f0","Type":"ContainerStarted","Data":"3d60061f940bf75fb1872f0b9d52bfef7e76dd03ec7462cd7d406e6f27317a40"} Nov 21 15:45:01 crc kubenswrapper[5133]: I1121 15:45:01.273948 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29395665-9x585" podStartSLOduration=1.273919735 podStartE2EDuration="1.273919735s" podCreationTimestamp="2025-11-21 15:45:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-21 15:45:01.265712577 +0000 UTC m=+7361.063544835" watchObservedRunningTime="2025-11-21 15:45:01.273919735 +0000 UTC m=+7361.071751983" Nov 21 15:45:01 crc kubenswrapper[5133]: I1121 15:45:01.457620 5133 scope.go:117] "RemoveContainer" containerID="b58db206d21a207a041b6beb15809267620ac7e9bf4cc60f4d82a43276d4adbd" Nov 21 15:45:01 crc kubenswrapper[5133]: E1121 15:45:01.457885 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:45:02 crc kubenswrapper[5133]: I1121 15:45:02.254107 5133 generic.go:334] "Generic (PLEG): container finished" podID="0bb4c161-26ff-4bb0-b3c3-bf4536a628f0" containerID="4d3a8cdf71c2d1ea1f4a06a1fb5bf24043d400b0ddb916ae03ffa70ab17a049d" exitCode=0 Nov 21 15:45:02 crc kubenswrapper[5133]: I1121 15:45:02.254191 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395665-9x585" event={"ID":"0bb4c161-26ff-4bb0-b3c3-bf4536a628f0","Type":"ContainerDied","Data":"4d3a8cdf71c2d1ea1f4a06a1fb5bf24043d400b0ddb916ae03ffa70ab17a049d"} Nov 21 15:45:03 crc kubenswrapper[5133]: I1121 15:45:03.579555 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395665-9x585" Nov 21 15:45:03 crc kubenswrapper[5133]: I1121 15:45:03.692303 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0bb4c161-26ff-4bb0-b3c3-bf4536a628f0-secret-volume\") pod \"0bb4c161-26ff-4bb0-b3c3-bf4536a628f0\" (UID: \"0bb4c161-26ff-4bb0-b3c3-bf4536a628f0\") " Nov 21 15:45:03 crc kubenswrapper[5133]: I1121 15:45:03.692489 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8zskc\" (UniqueName: \"kubernetes.io/projected/0bb4c161-26ff-4bb0-b3c3-bf4536a628f0-kube-api-access-8zskc\") pod \"0bb4c161-26ff-4bb0-b3c3-bf4536a628f0\" (UID: \"0bb4c161-26ff-4bb0-b3c3-bf4536a628f0\") " Nov 21 15:45:03 crc kubenswrapper[5133]: I1121 15:45:03.692539 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0bb4c161-26ff-4bb0-b3c3-bf4536a628f0-config-volume\") pod \"0bb4c161-26ff-4bb0-b3c3-bf4536a628f0\" (UID: \"0bb4c161-26ff-4bb0-b3c3-bf4536a628f0\") " Nov 21 15:45:03 crc kubenswrapper[5133]: I1121 15:45:03.693439 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0bb4c161-26ff-4bb0-b3c3-bf4536a628f0-config-volume" (OuterVolumeSpecName: "config-volume") pod "0bb4c161-26ff-4bb0-b3c3-bf4536a628f0" (UID: "0bb4c161-26ff-4bb0-b3c3-bf4536a628f0"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 21 15:45:03 crc kubenswrapper[5133]: I1121 15:45:03.698493 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0bb4c161-26ff-4bb0-b3c3-bf4536a628f0-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "0bb4c161-26ff-4bb0-b3c3-bf4536a628f0" (UID: "0bb4c161-26ff-4bb0-b3c3-bf4536a628f0"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 21 15:45:03 crc kubenswrapper[5133]: I1121 15:45:03.698843 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0bb4c161-26ff-4bb0-b3c3-bf4536a628f0-kube-api-access-8zskc" (OuterVolumeSpecName: "kube-api-access-8zskc") pod "0bb4c161-26ff-4bb0-b3c3-bf4536a628f0" (UID: "0bb4c161-26ff-4bb0-b3c3-bf4536a628f0"). InnerVolumeSpecName "kube-api-access-8zskc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:45:03 crc kubenswrapper[5133]: I1121 15:45:03.795077 5133 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0bb4c161-26ff-4bb0-b3c3-bf4536a628f0-config-volume\") on node \"crc\" DevicePath \"\"" Nov 21 15:45:03 crc kubenswrapper[5133]: I1121 15:45:03.795107 5133 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0bb4c161-26ff-4bb0-b3c3-bf4536a628f0-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 21 15:45:03 crc kubenswrapper[5133]: I1121 15:45:03.795117 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8zskc\" (UniqueName: \"kubernetes.io/projected/0bb4c161-26ff-4bb0-b3c3-bf4536a628f0-kube-api-access-8zskc\") on node \"crc\" DevicePath \"\"" Nov 21 15:45:04 crc kubenswrapper[5133]: I1121 15:45:04.275099 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29395665-9x585" event={"ID":"0bb4c161-26ff-4bb0-b3c3-bf4536a628f0","Type":"ContainerDied","Data":"3d60061f940bf75fb1872f0b9d52bfef7e76dd03ec7462cd7d406e6f27317a40"} Nov 21 15:45:04 crc kubenswrapper[5133]: I1121 15:45:04.275415 5133 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3d60061f940bf75fb1872f0b9d52bfef7e76dd03ec7462cd7d406e6f27317a40" Nov 21 15:45:04 crc kubenswrapper[5133]: I1121 15:45:04.275161 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29395665-9x585" Nov 21 15:45:04 crc kubenswrapper[5133]: I1121 15:45:04.363077 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395620-ntjcl"] Nov 21 15:45:04 crc kubenswrapper[5133]: I1121 15:45:04.372419 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29395620-ntjcl"] Nov 21 15:45:04 crc kubenswrapper[5133]: I1121 15:45:04.470520 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="648c9cba-13d7-4f5d-a95e-b874c2a3ef62" path="/var/lib/kubelet/pods/648c9cba-13d7-4f5d-a95e-b874c2a3ef62/volumes" Nov 21 15:45:16 crc kubenswrapper[5133]: I1121 15:45:16.457932 5133 scope.go:117] "RemoveContainer" containerID="b58db206d21a207a041b6beb15809267620ac7e9bf4cc60f4d82a43276d4adbd" Nov 21 15:45:16 crc kubenswrapper[5133]: E1121 15:45:16.458830 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:45:22 crc kubenswrapper[5133]: I1121 15:45:22.647237 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-jlbfd"] Nov 21 15:45:22 crc kubenswrapper[5133]: E1121 15:45:22.659135 5133 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0bb4c161-26ff-4bb0-b3c3-bf4536a628f0" containerName="collect-profiles" Nov 21 15:45:22 crc kubenswrapper[5133]: I1121 15:45:22.659189 5133 state_mem.go:107] "Deleted CPUSet assignment" podUID="0bb4c161-26ff-4bb0-b3c3-bf4536a628f0" containerName="collect-profiles" Nov 21 15:45:22 crc kubenswrapper[5133]: I1121 15:45:22.659808 5133 memory_manager.go:354] "RemoveStaleState removing state" podUID="0bb4c161-26ff-4bb0-b3c3-bf4536a628f0" containerName="collect-profiles" Nov 21 15:45:22 crc kubenswrapper[5133]: I1121 15:45:22.662770 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jlbfd" Nov 21 15:45:22 crc kubenswrapper[5133]: I1121 15:45:22.704311 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jlbfd"] Nov 21 15:45:22 crc kubenswrapper[5133]: I1121 15:45:22.790476 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d761b18b-4362-4856-bef0-629bdceb96ef-utilities\") pod \"redhat-marketplace-jlbfd\" (UID: \"d761b18b-4362-4856-bef0-629bdceb96ef\") " pod="openshift-marketplace/redhat-marketplace-jlbfd" Nov 21 15:45:22 crc kubenswrapper[5133]: I1121 15:45:22.790547 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d761b18b-4362-4856-bef0-629bdceb96ef-catalog-content\") pod \"redhat-marketplace-jlbfd\" (UID: \"d761b18b-4362-4856-bef0-629bdceb96ef\") " pod="openshift-marketplace/redhat-marketplace-jlbfd" Nov 21 15:45:22 crc kubenswrapper[5133]: I1121 15:45:22.790604 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mpmhj\" (UniqueName: \"kubernetes.io/projected/d761b18b-4362-4856-bef0-629bdceb96ef-kube-api-access-mpmhj\") pod \"redhat-marketplace-jlbfd\" (UID: \"d761b18b-4362-4856-bef0-629bdceb96ef\") " pod="openshift-marketplace/redhat-marketplace-jlbfd" Nov 21 15:45:22 crc kubenswrapper[5133]: I1121 15:45:22.892737 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d761b18b-4362-4856-bef0-629bdceb96ef-utilities\") pod \"redhat-marketplace-jlbfd\" (UID: \"d761b18b-4362-4856-bef0-629bdceb96ef\") " pod="openshift-marketplace/redhat-marketplace-jlbfd" Nov 21 15:45:22 crc kubenswrapper[5133]: I1121 15:45:22.893249 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d761b18b-4362-4856-bef0-629bdceb96ef-catalog-content\") pod \"redhat-marketplace-jlbfd\" (UID: \"d761b18b-4362-4856-bef0-629bdceb96ef\") " pod="openshift-marketplace/redhat-marketplace-jlbfd" Nov 21 15:45:22 crc kubenswrapper[5133]: I1121 15:45:22.893606 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mpmhj\" (UniqueName: \"kubernetes.io/projected/d761b18b-4362-4856-bef0-629bdceb96ef-kube-api-access-mpmhj\") pod \"redhat-marketplace-jlbfd\" (UID: \"d761b18b-4362-4856-bef0-629bdceb96ef\") " pod="openshift-marketplace/redhat-marketplace-jlbfd" Nov 21 15:45:22 crc kubenswrapper[5133]: I1121 15:45:22.893855 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d761b18b-4362-4856-bef0-629bdceb96ef-utilities\") pod \"redhat-marketplace-jlbfd\" (UID: \"d761b18b-4362-4856-bef0-629bdceb96ef\") " pod="openshift-marketplace/redhat-marketplace-jlbfd" Nov 21 15:45:22 crc kubenswrapper[5133]: I1121 15:45:22.894219 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d761b18b-4362-4856-bef0-629bdceb96ef-catalog-content\") pod \"redhat-marketplace-jlbfd\" (UID: \"d761b18b-4362-4856-bef0-629bdceb96ef\") " pod="openshift-marketplace/redhat-marketplace-jlbfd" Nov 21 15:45:22 crc kubenswrapper[5133]: I1121 15:45:22.916165 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mpmhj\" (UniqueName: \"kubernetes.io/projected/d761b18b-4362-4856-bef0-629bdceb96ef-kube-api-access-mpmhj\") pod \"redhat-marketplace-jlbfd\" (UID: \"d761b18b-4362-4856-bef0-629bdceb96ef\") " pod="openshift-marketplace/redhat-marketplace-jlbfd" Nov 21 15:45:22 crc kubenswrapper[5133]: I1121 15:45:22.985626 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jlbfd" Nov 21 15:45:23 crc kubenswrapper[5133]: I1121 15:45:23.416737 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jlbfd"] Nov 21 15:45:23 crc kubenswrapper[5133]: I1121 15:45:23.449812 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jlbfd" event={"ID":"d761b18b-4362-4856-bef0-629bdceb96ef","Type":"ContainerStarted","Data":"e84bd70c929a97bcaa633099a58f33712951d836cc3811df7b795a45dab88339"} Nov 21 15:45:24 crc kubenswrapper[5133]: I1121 15:45:24.462490 5133 generic.go:334] "Generic (PLEG): container finished" podID="d761b18b-4362-4856-bef0-629bdceb96ef" containerID="af3622c2b65b65fd7b40fcfaeffad85168e52f61c373f79c5d75619f7988d19f" exitCode=0 Nov 21 15:45:24 crc kubenswrapper[5133]: I1121 15:45:24.474720 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jlbfd" event={"ID":"d761b18b-4362-4856-bef0-629bdceb96ef","Type":"ContainerDied","Data":"af3622c2b65b65fd7b40fcfaeffad85168e52f61c373f79c5d75619f7988d19f"} Nov 21 15:45:24 crc kubenswrapper[5133]: I1121 15:45:24.837098 5133 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-tk4k4"] Nov 21 15:45:24 crc kubenswrapper[5133]: I1121 15:45:24.838822 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tk4k4" Nov 21 15:45:24 crc kubenswrapper[5133]: I1121 15:45:24.851842 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-tk4k4"] Nov 21 15:45:24 crc kubenswrapper[5133]: I1121 15:45:24.934716 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f68120f6-173c-42db-937a-011a71913de4-catalog-content\") pod \"community-operators-tk4k4\" (UID: \"f68120f6-173c-42db-937a-011a71913de4\") " pod="openshift-marketplace/community-operators-tk4k4" Nov 21 15:45:24 crc kubenswrapper[5133]: I1121 15:45:24.934859 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zz6cp\" (UniqueName: \"kubernetes.io/projected/f68120f6-173c-42db-937a-011a71913de4-kube-api-access-zz6cp\") pod \"community-operators-tk4k4\" (UID: \"f68120f6-173c-42db-937a-011a71913de4\") " pod="openshift-marketplace/community-operators-tk4k4" Nov 21 15:45:24 crc kubenswrapper[5133]: I1121 15:45:24.934935 5133 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f68120f6-173c-42db-937a-011a71913de4-utilities\") pod \"community-operators-tk4k4\" (UID: \"f68120f6-173c-42db-937a-011a71913de4\") " pod="openshift-marketplace/community-operators-tk4k4" Nov 21 15:45:25 crc kubenswrapper[5133]: I1121 15:45:25.036267 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zz6cp\" (UniqueName: \"kubernetes.io/projected/f68120f6-173c-42db-937a-011a71913de4-kube-api-access-zz6cp\") pod \"community-operators-tk4k4\" (UID: \"f68120f6-173c-42db-937a-011a71913de4\") " pod="openshift-marketplace/community-operators-tk4k4" Nov 21 15:45:25 crc kubenswrapper[5133]: I1121 15:45:25.036399 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f68120f6-173c-42db-937a-011a71913de4-utilities\") pod \"community-operators-tk4k4\" (UID: \"f68120f6-173c-42db-937a-011a71913de4\") " pod="openshift-marketplace/community-operators-tk4k4" Nov 21 15:45:25 crc kubenswrapper[5133]: I1121 15:45:25.036510 5133 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f68120f6-173c-42db-937a-011a71913de4-catalog-content\") pod \"community-operators-tk4k4\" (UID: \"f68120f6-173c-42db-937a-011a71913de4\") " pod="openshift-marketplace/community-operators-tk4k4" Nov 21 15:45:25 crc kubenswrapper[5133]: I1121 15:45:25.036978 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f68120f6-173c-42db-937a-011a71913de4-catalog-content\") pod \"community-operators-tk4k4\" (UID: \"f68120f6-173c-42db-937a-011a71913de4\") " pod="openshift-marketplace/community-operators-tk4k4" Nov 21 15:45:25 crc kubenswrapper[5133]: I1121 15:45:25.037300 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f68120f6-173c-42db-937a-011a71913de4-utilities\") pod \"community-operators-tk4k4\" (UID: \"f68120f6-173c-42db-937a-011a71913de4\") " pod="openshift-marketplace/community-operators-tk4k4" Nov 21 15:45:25 crc kubenswrapper[5133]: I1121 15:45:25.055871 5133 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zz6cp\" (UniqueName: \"kubernetes.io/projected/f68120f6-173c-42db-937a-011a71913de4-kube-api-access-zz6cp\") pod \"community-operators-tk4k4\" (UID: \"f68120f6-173c-42db-937a-011a71913de4\") " pod="openshift-marketplace/community-operators-tk4k4" Nov 21 15:45:25 crc kubenswrapper[5133]: I1121 15:45:25.159047 5133 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tk4k4" Nov 21 15:45:25 crc kubenswrapper[5133]: I1121 15:45:25.472769 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jlbfd" event={"ID":"d761b18b-4362-4856-bef0-629bdceb96ef","Type":"ContainerStarted","Data":"177327ee383e2c6b07d869857851bdc35038b780f315a124f4e79fbcab07ef7c"} Nov 21 15:45:25 crc kubenswrapper[5133]: I1121 15:45:25.648117 5133 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-tk4k4"] Nov 21 15:45:25 crc kubenswrapper[5133]: W1121 15:45:25.652834 5133 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf68120f6_173c_42db_937a_011a71913de4.slice/crio-072fd8da5700681bb4c788739555f40ccfda5784b980e4d3a8951aca662145a5 WatchSource:0}: Error finding container 072fd8da5700681bb4c788739555f40ccfda5784b980e4d3a8951aca662145a5: Status 404 returned error can't find the container with id 072fd8da5700681bb4c788739555f40ccfda5784b980e4d3a8951aca662145a5 Nov 21 15:45:26 crc kubenswrapper[5133]: I1121 15:45:26.484406 5133 generic.go:334] "Generic (PLEG): container finished" podID="d761b18b-4362-4856-bef0-629bdceb96ef" containerID="177327ee383e2c6b07d869857851bdc35038b780f315a124f4e79fbcab07ef7c" exitCode=0 Nov 21 15:45:26 crc kubenswrapper[5133]: I1121 15:45:26.484502 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jlbfd" event={"ID":"d761b18b-4362-4856-bef0-629bdceb96ef","Type":"ContainerDied","Data":"177327ee383e2c6b07d869857851bdc35038b780f315a124f4e79fbcab07ef7c"} Nov 21 15:45:26 crc kubenswrapper[5133]: I1121 15:45:26.486769 5133 generic.go:334] "Generic (PLEG): container finished" podID="f68120f6-173c-42db-937a-011a71913de4" containerID="9e93eb55e7c145b56c21219f95b9be2bd8f6d365383e2ea88725c766602a6742" exitCode=0 Nov 21 15:45:26 crc kubenswrapper[5133]: I1121 15:45:26.486798 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tk4k4" event={"ID":"f68120f6-173c-42db-937a-011a71913de4","Type":"ContainerDied","Data":"9e93eb55e7c145b56c21219f95b9be2bd8f6d365383e2ea88725c766602a6742"} Nov 21 15:45:26 crc kubenswrapper[5133]: I1121 15:45:26.486816 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tk4k4" event={"ID":"f68120f6-173c-42db-937a-011a71913de4","Type":"ContainerStarted","Data":"072fd8da5700681bb4c788739555f40ccfda5784b980e4d3a8951aca662145a5"} Nov 21 15:45:27 crc kubenswrapper[5133]: I1121 15:45:27.499712 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jlbfd" event={"ID":"d761b18b-4362-4856-bef0-629bdceb96ef","Type":"ContainerStarted","Data":"98ea921f05e4209ab11924994e1e7f5a4d15e5b767ed7cf0da7507f5eda6b01f"} Nov 21 15:45:27 crc kubenswrapper[5133]: I1121 15:45:27.502379 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tk4k4" event={"ID":"f68120f6-173c-42db-937a-011a71913de4","Type":"ContainerStarted","Data":"3cca6cf78742599bacc37bf1dbbd2281cb8dd05238a99af0ef1337d12b13e3a1"} Nov 21 15:45:27 crc kubenswrapper[5133]: I1121 15:45:27.524493 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-jlbfd" podStartSLOduration=3.07447559 podStartE2EDuration="5.524476746s" podCreationTimestamp="2025-11-21 15:45:22 +0000 UTC" firstStartedPulling="2025-11-21 15:45:24.464706306 +0000 UTC m=+7384.262538554" lastFinishedPulling="2025-11-21 15:45:26.914707462 +0000 UTC m=+7386.712539710" observedRunningTime="2025-11-21 15:45:27.521149538 +0000 UTC m=+7387.318981816" watchObservedRunningTime="2025-11-21 15:45:27.524476746 +0000 UTC m=+7387.322309004" Nov 21 15:45:29 crc kubenswrapper[5133]: I1121 15:45:29.458050 5133 scope.go:117] "RemoveContainer" containerID="b58db206d21a207a041b6beb15809267620ac7e9bf4cc60f4d82a43276d4adbd" Nov 21 15:45:29 crc kubenswrapper[5133]: E1121 15:45:29.459033 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:45:29 crc kubenswrapper[5133]: I1121 15:45:29.530434 5133 generic.go:334] "Generic (PLEG): container finished" podID="f68120f6-173c-42db-937a-011a71913de4" containerID="3cca6cf78742599bacc37bf1dbbd2281cb8dd05238a99af0ef1337d12b13e3a1" exitCode=0 Nov 21 15:45:29 crc kubenswrapper[5133]: I1121 15:45:29.530489 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tk4k4" event={"ID":"f68120f6-173c-42db-937a-011a71913de4","Type":"ContainerDied","Data":"3cca6cf78742599bacc37bf1dbbd2281cb8dd05238a99af0ef1337d12b13e3a1"} Nov 21 15:45:30 crc kubenswrapper[5133]: I1121 15:45:30.543370 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tk4k4" event={"ID":"f68120f6-173c-42db-937a-011a71913de4","Type":"ContainerStarted","Data":"5cfe5af0c74a12157da4d4d61363643a26dd78f8232a68076825c9a7bca31df6"} Nov 21 15:45:30 crc kubenswrapper[5133]: I1121 15:45:30.574301 5133 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-tk4k4" podStartSLOduration=3.138231892 podStartE2EDuration="6.57427113s" podCreationTimestamp="2025-11-21 15:45:24 +0000 UTC" firstStartedPulling="2025-11-21 15:45:26.489328338 +0000 UTC m=+7386.287160586" lastFinishedPulling="2025-11-21 15:45:29.925367566 +0000 UTC m=+7389.723199824" observedRunningTime="2025-11-21 15:45:30.563835423 +0000 UTC m=+7390.361667711" watchObservedRunningTime="2025-11-21 15:45:30.57427113 +0000 UTC m=+7390.372103418" Nov 21 15:45:32 crc kubenswrapper[5133]: I1121 15:45:32.985863 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-jlbfd" Nov 21 15:45:32 crc kubenswrapper[5133]: I1121 15:45:32.985911 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-jlbfd" Nov 21 15:45:33 crc kubenswrapper[5133]: I1121 15:45:33.038772 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-jlbfd" Nov 21 15:45:33 crc kubenswrapper[5133]: I1121 15:45:33.630564 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-jlbfd" Nov 21 15:45:34 crc kubenswrapper[5133]: I1121 15:45:34.430633 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-jlbfd"] Nov 21 15:45:35 crc kubenswrapper[5133]: I1121 15:45:35.159794 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-tk4k4" Nov 21 15:45:35 crc kubenswrapper[5133]: I1121 15:45:35.159982 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-tk4k4" Nov 21 15:45:35 crc kubenswrapper[5133]: I1121 15:45:35.249900 5133 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-tk4k4" Nov 21 15:45:35 crc kubenswrapper[5133]: I1121 15:45:35.615444 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-jlbfd" podUID="d761b18b-4362-4856-bef0-629bdceb96ef" containerName="registry-server" containerID="cri-o://98ea921f05e4209ab11924994e1e7f5a4d15e5b767ed7cf0da7507f5eda6b01f" gracePeriod=2 Nov 21 15:45:35 crc kubenswrapper[5133]: I1121 15:45:35.682948 5133 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-tk4k4" Nov 21 15:45:36 crc kubenswrapper[5133]: I1121 15:45:36.081269 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jlbfd" Nov 21 15:45:36 crc kubenswrapper[5133]: I1121 15:45:36.152673 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d761b18b-4362-4856-bef0-629bdceb96ef-catalog-content\") pod \"d761b18b-4362-4856-bef0-629bdceb96ef\" (UID: \"d761b18b-4362-4856-bef0-629bdceb96ef\") " Nov 21 15:45:36 crc kubenswrapper[5133]: I1121 15:45:36.153113 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mpmhj\" (UniqueName: \"kubernetes.io/projected/d761b18b-4362-4856-bef0-629bdceb96ef-kube-api-access-mpmhj\") pod \"d761b18b-4362-4856-bef0-629bdceb96ef\" (UID: \"d761b18b-4362-4856-bef0-629bdceb96ef\") " Nov 21 15:45:36 crc kubenswrapper[5133]: I1121 15:45:36.153253 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d761b18b-4362-4856-bef0-629bdceb96ef-utilities\") pod \"d761b18b-4362-4856-bef0-629bdceb96ef\" (UID: \"d761b18b-4362-4856-bef0-629bdceb96ef\") " Nov 21 15:45:36 crc kubenswrapper[5133]: I1121 15:45:36.153852 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d761b18b-4362-4856-bef0-629bdceb96ef-utilities" (OuterVolumeSpecName: "utilities") pod "d761b18b-4362-4856-bef0-629bdceb96ef" (UID: "d761b18b-4362-4856-bef0-629bdceb96ef"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:45:36 crc kubenswrapper[5133]: I1121 15:45:36.154353 5133 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d761b18b-4362-4856-bef0-629bdceb96ef-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 15:45:36 crc kubenswrapper[5133]: I1121 15:45:36.159596 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d761b18b-4362-4856-bef0-629bdceb96ef-kube-api-access-mpmhj" (OuterVolumeSpecName: "kube-api-access-mpmhj") pod "d761b18b-4362-4856-bef0-629bdceb96ef" (UID: "d761b18b-4362-4856-bef0-629bdceb96ef"). InnerVolumeSpecName "kube-api-access-mpmhj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:45:36 crc kubenswrapper[5133]: I1121 15:45:36.173991 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d761b18b-4362-4856-bef0-629bdceb96ef-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d761b18b-4362-4856-bef0-629bdceb96ef" (UID: "d761b18b-4362-4856-bef0-629bdceb96ef"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:45:36 crc kubenswrapper[5133]: I1121 15:45:36.257287 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mpmhj\" (UniqueName: \"kubernetes.io/projected/d761b18b-4362-4856-bef0-629bdceb96ef-kube-api-access-mpmhj\") on node \"crc\" DevicePath \"\"" Nov 21 15:45:36 crc kubenswrapper[5133]: I1121 15:45:36.257343 5133 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d761b18b-4362-4856-bef0-629bdceb96ef-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 15:45:36 crc kubenswrapper[5133]: I1121 15:45:36.625641 5133 generic.go:334] "Generic (PLEG): container finished" podID="d761b18b-4362-4856-bef0-629bdceb96ef" containerID="98ea921f05e4209ab11924994e1e7f5a4d15e5b767ed7cf0da7507f5eda6b01f" exitCode=0 Nov 21 15:45:36 crc kubenswrapper[5133]: I1121 15:45:36.625717 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jlbfd" Nov 21 15:45:36 crc kubenswrapper[5133]: I1121 15:45:36.625745 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jlbfd" event={"ID":"d761b18b-4362-4856-bef0-629bdceb96ef","Type":"ContainerDied","Data":"98ea921f05e4209ab11924994e1e7f5a4d15e5b767ed7cf0da7507f5eda6b01f"} Nov 21 15:45:36 crc kubenswrapper[5133]: I1121 15:45:36.626115 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jlbfd" event={"ID":"d761b18b-4362-4856-bef0-629bdceb96ef","Type":"ContainerDied","Data":"e84bd70c929a97bcaa633099a58f33712951d836cc3811df7b795a45dab88339"} Nov 21 15:45:36 crc kubenswrapper[5133]: I1121 15:45:36.626142 5133 scope.go:117] "RemoveContainer" containerID="98ea921f05e4209ab11924994e1e7f5a4d15e5b767ed7cf0da7507f5eda6b01f" Nov 21 15:45:36 crc kubenswrapper[5133]: I1121 15:45:36.652327 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-jlbfd"] Nov 21 15:45:36 crc kubenswrapper[5133]: I1121 15:45:36.654261 5133 scope.go:117] "RemoveContainer" containerID="177327ee383e2c6b07d869857851bdc35038b780f315a124f4e79fbcab07ef7c" Nov 21 15:45:36 crc kubenswrapper[5133]: I1121 15:45:36.660500 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-jlbfd"] Nov 21 15:45:36 crc kubenswrapper[5133]: I1121 15:45:36.682720 5133 scope.go:117] "RemoveContainer" containerID="af3622c2b65b65fd7b40fcfaeffad85168e52f61c373f79c5d75619f7988d19f" Nov 21 15:45:36 crc kubenswrapper[5133]: I1121 15:45:36.731524 5133 scope.go:117] "RemoveContainer" containerID="98ea921f05e4209ab11924994e1e7f5a4d15e5b767ed7cf0da7507f5eda6b01f" Nov 21 15:45:36 crc kubenswrapper[5133]: E1121 15:45:36.732063 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"98ea921f05e4209ab11924994e1e7f5a4d15e5b767ed7cf0da7507f5eda6b01f\": container with ID starting with 98ea921f05e4209ab11924994e1e7f5a4d15e5b767ed7cf0da7507f5eda6b01f not found: ID does not exist" containerID="98ea921f05e4209ab11924994e1e7f5a4d15e5b767ed7cf0da7507f5eda6b01f" Nov 21 15:45:36 crc kubenswrapper[5133]: I1121 15:45:36.732115 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"98ea921f05e4209ab11924994e1e7f5a4d15e5b767ed7cf0da7507f5eda6b01f"} err="failed to get container status \"98ea921f05e4209ab11924994e1e7f5a4d15e5b767ed7cf0da7507f5eda6b01f\": rpc error: code = NotFound desc = could not find container \"98ea921f05e4209ab11924994e1e7f5a4d15e5b767ed7cf0da7507f5eda6b01f\": container with ID starting with 98ea921f05e4209ab11924994e1e7f5a4d15e5b767ed7cf0da7507f5eda6b01f not found: ID does not exist" Nov 21 15:45:36 crc kubenswrapper[5133]: I1121 15:45:36.732143 5133 scope.go:117] "RemoveContainer" containerID="177327ee383e2c6b07d869857851bdc35038b780f315a124f4e79fbcab07ef7c" Nov 21 15:45:36 crc kubenswrapper[5133]: E1121 15:45:36.732602 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"177327ee383e2c6b07d869857851bdc35038b780f315a124f4e79fbcab07ef7c\": container with ID starting with 177327ee383e2c6b07d869857851bdc35038b780f315a124f4e79fbcab07ef7c not found: ID does not exist" containerID="177327ee383e2c6b07d869857851bdc35038b780f315a124f4e79fbcab07ef7c" Nov 21 15:45:36 crc kubenswrapper[5133]: I1121 15:45:36.732650 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"177327ee383e2c6b07d869857851bdc35038b780f315a124f4e79fbcab07ef7c"} err="failed to get container status \"177327ee383e2c6b07d869857851bdc35038b780f315a124f4e79fbcab07ef7c\": rpc error: code = NotFound desc = could not find container \"177327ee383e2c6b07d869857851bdc35038b780f315a124f4e79fbcab07ef7c\": container with ID starting with 177327ee383e2c6b07d869857851bdc35038b780f315a124f4e79fbcab07ef7c not found: ID does not exist" Nov 21 15:45:36 crc kubenswrapper[5133]: I1121 15:45:36.732678 5133 scope.go:117] "RemoveContainer" containerID="af3622c2b65b65fd7b40fcfaeffad85168e52f61c373f79c5d75619f7988d19f" Nov 21 15:45:36 crc kubenswrapper[5133]: E1121 15:45:36.733062 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"af3622c2b65b65fd7b40fcfaeffad85168e52f61c373f79c5d75619f7988d19f\": container with ID starting with af3622c2b65b65fd7b40fcfaeffad85168e52f61c373f79c5d75619f7988d19f not found: ID does not exist" containerID="af3622c2b65b65fd7b40fcfaeffad85168e52f61c373f79c5d75619f7988d19f" Nov 21 15:45:36 crc kubenswrapper[5133]: I1121 15:45:36.733109 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"af3622c2b65b65fd7b40fcfaeffad85168e52f61c373f79c5d75619f7988d19f"} err="failed to get container status \"af3622c2b65b65fd7b40fcfaeffad85168e52f61c373f79c5d75619f7988d19f\": rpc error: code = NotFound desc = could not find container \"af3622c2b65b65fd7b40fcfaeffad85168e52f61c373f79c5d75619f7988d19f\": container with ID starting with af3622c2b65b65fd7b40fcfaeffad85168e52f61c373f79c5d75619f7988d19f not found: ID does not exist" Nov 21 15:45:37 crc kubenswrapper[5133]: I1121 15:45:37.439319 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-tk4k4"] Nov 21 15:45:38 crc kubenswrapper[5133]: I1121 15:45:38.470198 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d761b18b-4362-4856-bef0-629bdceb96ef" path="/var/lib/kubelet/pods/d761b18b-4362-4856-bef0-629bdceb96ef/volumes" Nov 21 15:45:38 crc kubenswrapper[5133]: I1121 15:45:38.645908 5133 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-tk4k4" podUID="f68120f6-173c-42db-937a-011a71913de4" containerName="registry-server" containerID="cri-o://5cfe5af0c74a12157da4d4d61363643a26dd78f8232a68076825c9a7bca31df6" gracePeriod=2 Nov 21 15:45:39 crc kubenswrapper[5133]: I1121 15:45:39.186083 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tk4k4" Nov 21 15:45:39 crc kubenswrapper[5133]: I1121 15:45:39.324656 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zz6cp\" (UniqueName: \"kubernetes.io/projected/f68120f6-173c-42db-937a-011a71913de4-kube-api-access-zz6cp\") pod \"f68120f6-173c-42db-937a-011a71913de4\" (UID: \"f68120f6-173c-42db-937a-011a71913de4\") " Nov 21 15:45:39 crc kubenswrapper[5133]: I1121 15:45:39.324794 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f68120f6-173c-42db-937a-011a71913de4-utilities\") pod \"f68120f6-173c-42db-937a-011a71913de4\" (UID: \"f68120f6-173c-42db-937a-011a71913de4\") " Nov 21 15:45:39 crc kubenswrapper[5133]: I1121 15:45:39.324906 5133 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f68120f6-173c-42db-937a-011a71913de4-catalog-content\") pod \"f68120f6-173c-42db-937a-011a71913de4\" (UID: \"f68120f6-173c-42db-937a-011a71913de4\") " Nov 21 15:45:39 crc kubenswrapper[5133]: I1121 15:45:39.325607 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f68120f6-173c-42db-937a-011a71913de4-utilities" (OuterVolumeSpecName: "utilities") pod "f68120f6-173c-42db-937a-011a71913de4" (UID: "f68120f6-173c-42db-937a-011a71913de4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:45:39 crc kubenswrapper[5133]: I1121 15:45:39.329916 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f68120f6-173c-42db-937a-011a71913de4-kube-api-access-zz6cp" (OuterVolumeSpecName: "kube-api-access-zz6cp") pod "f68120f6-173c-42db-937a-011a71913de4" (UID: "f68120f6-173c-42db-937a-011a71913de4"). InnerVolumeSpecName "kube-api-access-zz6cp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 21 15:45:39 crc kubenswrapper[5133]: I1121 15:45:39.391592 5133 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f68120f6-173c-42db-937a-011a71913de4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f68120f6-173c-42db-937a-011a71913de4" (UID: "f68120f6-173c-42db-937a-011a71913de4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 21 15:45:39 crc kubenswrapper[5133]: I1121 15:45:39.427059 5133 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zz6cp\" (UniqueName: \"kubernetes.io/projected/f68120f6-173c-42db-937a-011a71913de4-kube-api-access-zz6cp\") on node \"crc\" DevicePath \"\"" Nov 21 15:45:39 crc kubenswrapper[5133]: I1121 15:45:39.427089 5133 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f68120f6-173c-42db-937a-011a71913de4-utilities\") on node \"crc\" DevicePath \"\"" Nov 21 15:45:39 crc kubenswrapper[5133]: I1121 15:45:39.427098 5133 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f68120f6-173c-42db-937a-011a71913de4-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 21 15:45:39 crc kubenswrapper[5133]: I1121 15:45:39.657766 5133 generic.go:334] "Generic (PLEG): container finished" podID="f68120f6-173c-42db-937a-011a71913de4" containerID="5cfe5af0c74a12157da4d4d61363643a26dd78f8232a68076825c9a7bca31df6" exitCode=0 Nov 21 15:45:39 crc kubenswrapper[5133]: I1121 15:45:39.657817 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tk4k4" event={"ID":"f68120f6-173c-42db-937a-011a71913de4","Type":"ContainerDied","Data":"5cfe5af0c74a12157da4d4d61363643a26dd78f8232a68076825c9a7bca31df6"} Nov 21 15:45:39 crc kubenswrapper[5133]: I1121 15:45:39.657849 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tk4k4" event={"ID":"f68120f6-173c-42db-937a-011a71913de4","Type":"ContainerDied","Data":"072fd8da5700681bb4c788739555f40ccfda5784b980e4d3a8951aca662145a5"} Nov 21 15:45:39 crc kubenswrapper[5133]: I1121 15:45:39.657853 5133 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tk4k4" Nov 21 15:45:39 crc kubenswrapper[5133]: I1121 15:45:39.657871 5133 scope.go:117] "RemoveContainer" containerID="5cfe5af0c74a12157da4d4d61363643a26dd78f8232a68076825c9a7bca31df6" Nov 21 15:45:39 crc kubenswrapper[5133]: I1121 15:45:39.687759 5133 scope.go:117] "RemoveContainer" containerID="3cca6cf78742599bacc37bf1dbbd2281cb8dd05238a99af0ef1337d12b13e3a1" Nov 21 15:45:39 crc kubenswrapper[5133]: I1121 15:45:39.707857 5133 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-tk4k4"] Nov 21 15:45:39 crc kubenswrapper[5133]: I1121 15:45:39.715447 5133 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-tk4k4"] Nov 21 15:45:39 crc kubenswrapper[5133]: I1121 15:45:39.734325 5133 scope.go:117] "RemoveContainer" containerID="9e93eb55e7c145b56c21219f95b9be2bd8f6d365383e2ea88725c766602a6742" Nov 21 15:45:39 crc kubenswrapper[5133]: I1121 15:45:39.766418 5133 scope.go:117] "RemoveContainer" containerID="5cfe5af0c74a12157da4d4d61363643a26dd78f8232a68076825c9a7bca31df6" Nov 21 15:45:39 crc kubenswrapper[5133]: E1121 15:45:39.767038 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5cfe5af0c74a12157da4d4d61363643a26dd78f8232a68076825c9a7bca31df6\": container with ID starting with 5cfe5af0c74a12157da4d4d61363643a26dd78f8232a68076825c9a7bca31df6 not found: ID does not exist" containerID="5cfe5af0c74a12157da4d4d61363643a26dd78f8232a68076825c9a7bca31df6" Nov 21 15:45:39 crc kubenswrapper[5133]: I1121 15:45:39.767119 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5cfe5af0c74a12157da4d4d61363643a26dd78f8232a68076825c9a7bca31df6"} err="failed to get container status \"5cfe5af0c74a12157da4d4d61363643a26dd78f8232a68076825c9a7bca31df6\": rpc error: code = NotFound desc = could not find container \"5cfe5af0c74a12157da4d4d61363643a26dd78f8232a68076825c9a7bca31df6\": container with ID starting with 5cfe5af0c74a12157da4d4d61363643a26dd78f8232a68076825c9a7bca31df6 not found: ID does not exist" Nov 21 15:45:39 crc kubenswrapper[5133]: I1121 15:45:39.767157 5133 scope.go:117] "RemoveContainer" containerID="3cca6cf78742599bacc37bf1dbbd2281cb8dd05238a99af0ef1337d12b13e3a1" Nov 21 15:45:39 crc kubenswrapper[5133]: E1121 15:45:39.767743 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3cca6cf78742599bacc37bf1dbbd2281cb8dd05238a99af0ef1337d12b13e3a1\": container with ID starting with 3cca6cf78742599bacc37bf1dbbd2281cb8dd05238a99af0ef1337d12b13e3a1 not found: ID does not exist" containerID="3cca6cf78742599bacc37bf1dbbd2281cb8dd05238a99af0ef1337d12b13e3a1" Nov 21 15:45:39 crc kubenswrapper[5133]: I1121 15:45:39.767771 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3cca6cf78742599bacc37bf1dbbd2281cb8dd05238a99af0ef1337d12b13e3a1"} err="failed to get container status \"3cca6cf78742599bacc37bf1dbbd2281cb8dd05238a99af0ef1337d12b13e3a1\": rpc error: code = NotFound desc = could not find container \"3cca6cf78742599bacc37bf1dbbd2281cb8dd05238a99af0ef1337d12b13e3a1\": container with ID starting with 3cca6cf78742599bacc37bf1dbbd2281cb8dd05238a99af0ef1337d12b13e3a1 not found: ID does not exist" Nov 21 15:45:39 crc kubenswrapper[5133]: I1121 15:45:39.767785 5133 scope.go:117] "RemoveContainer" containerID="9e93eb55e7c145b56c21219f95b9be2bd8f6d365383e2ea88725c766602a6742" Nov 21 15:45:39 crc kubenswrapper[5133]: E1121 15:45:39.768035 5133 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9e93eb55e7c145b56c21219f95b9be2bd8f6d365383e2ea88725c766602a6742\": container with ID starting with 9e93eb55e7c145b56c21219f95b9be2bd8f6d365383e2ea88725c766602a6742 not found: ID does not exist" containerID="9e93eb55e7c145b56c21219f95b9be2bd8f6d365383e2ea88725c766602a6742" Nov 21 15:45:39 crc kubenswrapper[5133]: I1121 15:45:39.768158 5133 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9e93eb55e7c145b56c21219f95b9be2bd8f6d365383e2ea88725c766602a6742"} err="failed to get container status \"9e93eb55e7c145b56c21219f95b9be2bd8f6d365383e2ea88725c766602a6742\": rpc error: code = NotFound desc = could not find container \"9e93eb55e7c145b56c21219f95b9be2bd8f6d365383e2ea88725c766602a6742\": container with ID starting with 9e93eb55e7c145b56c21219f95b9be2bd8f6d365383e2ea88725c766602a6742 not found: ID does not exist" Nov 21 15:45:40 crc kubenswrapper[5133]: I1121 15:45:40.458090 5133 scope.go:117] "RemoveContainer" containerID="b58db206d21a207a041b6beb15809267620ac7e9bf4cc60f4d82a43276d4adbd" Nov 21 15:45:40 crc kubenswrapper[5133]: E1121 15:45:40.458666 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:45:40 crc kubenswrapper[5133]: I1121 15:45:40.470126 5133 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f68120f6-173c-42db-937a-011a71913de4" path="/var/lib/kubelet/pods/f68120f6-173c-42db-937a-011a71913de4/volumes" Nov 21 15:45:44 crc kubenswrapper[5133]: I1121 15:45:44.134770 5133 scope.go:117] "RemoveContainer" containerID="29eadaabc384b5aed5fcb68407e1fefbbb708de17168627976517f9c2fe0983e" Nov 21 15:45:44 crc kubenswrapper[5133]: I1121 15:45:44.156119 5133 scope.go:117] "RemoveContainer" containerID="afbc9671d28b4dd06353edf7f13cb95fff7c848c57287ceb5e800d0d56ef250c" Nov 21 15:45:51 crc kubenswrapper[5133]: I1121 15:45:51.458271 5133 scope.go:117] "RemoveContainer" containerID="b58db206d21a207a041b6beb15809267620ac7e9bf4cc60f4d82a43276d4adbd" Nov 21 15:45:51 crc kubenswrapper[5133]: E1121 15:45:51.459468 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:46:05 crc kubenswrapper[5133]: I1121 15:46:05.457633 5133 scope.go:117] "RemoveContainer" containerID="b58db206d21a207a041b6beb15809267620ac7e9bf4cc60f4d82a43276d4adbd" Nov 21 15:46:05 crc kubenswrapper[5133]: E1121 15:46:05.458704 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:46:20 crc kubenswrapper[5133]: I1121 15:46:20.458522 5133 scope.go:117] "RemoveContainer" containerID="b58db206d21a207a041b6beb15809267620ac7e9bf4cc60f4d82a43276d4adbd" Nov 21 15:46:20 crc kubenswrapper[5133]: E1121 15:46:20.459518 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:46:34 crc kubenswrapper[5133]: I1121 15:46:34.457949 5133 scope.go:117] "RemoveContainer" containerID="b58db206d21a207a041b6beb15809267620ac7e9bf4cc60f4d82a43276d4adbd" Nov 21 15:46:34 crc kubenswrapper[5133]: E1121 15:46:34.458738 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:46:44 crc kubenswrapper[5133]: I1121 15:46:44.291334 5133 scope.go:117] "RemoveContainer" containerID="e3544c0d89c702117beff1d3a42e9534916a377f07bf5219b324e7880f4466a9" Nov 21 15:46:46 crc kubenswrapper[5133]: I1121 15:46:46.458279 5133 scope.go:117] "RemoveContainer" containerID="b58db206d21a207a041b6beb15809267620ac7e9bf4cc60f4d82a43276d4adbd" Nov 21 15:46:46 crc kubenswrapper[5133]: E1121 15:46:46.458924 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:46:59 crc kubenswrapper[5133]: I1121 15:46:59.458191 5133 scope.go:117] "RemoveContainer" containerID="b58db206d21a207a041b6beb15809267620ac7e9bf4cc60f4d82a43276d4adbd" Nov 21 15:46:59 crc kubenswrapper[5133]: E1121 15:46:59.459123 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:47:14 crc kubenswrapper[5133]: I1121 15:47:14.458656 5133 scope.go:117] "RemoveContainer" containerID="b58db206d21a207a041b6beb15809267620ac7e9bf4cc60f4d82a43276d4adbd" Nov 21 15:47:14 crc kubenswrapper[5133]: E1121 15:47:14.459591 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:47:25 crc kubenswrapper[5133]: I1121 15:47:25.458393 5133 scope.go:117] "RemoveContainer" containerID="b58db206d21a207a041b6beb15809267620ac7e9bf4cc60f4d82a43276d4adbd" Nov 21 15:47:25 crc kubenswrapper[5133]: E1121 15:47:25.459400 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:47:36 crc kubenswrapper[5133]: I1121 15:47:36.458151 5133 scope.go:117] "RemoveContainer" containerID="b58db206d21a207a041b6beb15809267620ac7e9bf4cc60f4d82a43276d4adbd" Nov 21 15:47:36 crc kubenswrapper[5133]: E1121 15:47:36.459074 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:47:48 crc kubenswrapper[5133]: I1121 15:47:48.458083 5133 scope.go:117] "RemoveContainer" containerID="b58db206d21a207a041b6beb15809267620ac7e9bf4cc60f4d82a43276d4adbd" Nov 21 15:47:48 crc kubenswrapper[5133]: E1121 15:47:48.459112 5133 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xxlvp_openshift-machine-config-operator(52f5a729-05d1-4f84-a216-1df3233af57d)\"" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" podUID="52f5a729-05d1-4f84-a216-1df3233af57d" Nov 21 15:48:00 crc kubenswrapper[5133]: I1121 15:48:00.457745 5133 scope.go:117] "RemoveContainer" containerID="b58db206d21a207a041b6beb15809267620ac7e9bf4cc60f4d82a43276d4adbd" Nov 21 15:48:01 crc kubenswrapper[5133]: I1121 15:48:01.268895 5133 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xxlvp" event={"ID":"52f5a729-05d1-4f84-a216-1df3233af57d","Type":"ContainerStarted","Data":"b7fac6938257da821475283f5c7e2665330e30e78abaaaec95648ed3ef6938bd"} var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515110105050024431 0ustar coreroot‹íÁ  ÷Om7 €7šÞ'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015110105051017347 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015110065475016511 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015110065475015461 5ustar corecore